Update dependencies and tests

This commit updates the dependencies, vendor files and updates tests
to take into account changes to the DB driver.

Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
Gabriel Adrian Samfira 2024-04-22 13:38:51 +00:00
parent 069bdd8b6b
commit 97d03dd38d
693 changed files with 86307 additions and 28214 deletions

View file

@ -244,8 +244,8 @@ func (s *EnterpriseTestSuite) TestGetEnterpriseNotFound() {
func (s *EnterpriseTestSuite) TestGetEnterpriseDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE name = ? COLLATE NOCASE AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].Name).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE name = ? COLLATE NOCASE AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Enterprises[0].Name))
_, err := s.StoreSQLMocked.GetEnterprise(context.Background(), s.Fixtures.Enterprises[0].Name)
@ -292,8 +292,8 @@ func (s *EnterpriseTestSuite) TestDeleteEnterpriseInvalidEnterpriseID() {
func (s *EnterpriseTestSuite) TestDeleteEnterpriseDBDeleteErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -328,8 +328,8 @@ func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBEncryptErr() {
s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
_, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
@ -341,8 +341,8 @@ func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBEncryptErr() {
func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBSaveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -362,8 +362,8 @@ func (s *EnterpriseTestSuite) TestUpdateEnterpriseDBDecryptingErr() {
s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
_, err := s.StoreSQLMocked.UpdateEnterprise(context.Background(), s.Fixtures.Enterprises[0].ID, s.Fixtures.UpdateRepoParams)
@ -389,8 +389,8 @@ func (s *EnterpriseTestSuite) TestGetEnterpriseByIDInvalidEnterpriseID() {
func (s *EnterpriseTestSuite) TestGetEnterpriseByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`enterprise_id` = ? AND `pools`.`deleted_at` IS NULL")).
@ -446,11 +446,11 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolInvalidEnterpriseID() {
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBCreateErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked creating pool error"))
entity, err := s.Fixtures.Enterprises[0].GetEntity()
@ -465,16 +465,16 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBCreateErr() {
func (s *EnterpriseTestSuite) TestCreateEnterpriseDBPoolAlreadyExistErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Enterprises[0].ID).
s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id", "provider_name", "image", "flavor"}).
AddRow(
s.Fixtures.Enterprises[0].ID,
@ -494,19 +494,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterpriseDBPoolAlreadyExistErr() {
func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Enterprises[0].ID).
s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
entity, err := s.Fixtures.Enterprises[0].GetEntity()
@ -522,19 +522,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBAddingPoolErr() {
s.Fixtures.CreatePoolParams.Tags = []string{"linux"}
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Enterprises[0].ID).
s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -558,19 +558,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBSaveTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Enterprises[0].ID).
s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -597,19 +597,19 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Enterprises[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `enterprises` WHERE id = ? AND `enterprises`.`deleted_at` IS NULL ORDER BY `enterprises`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Enterprises[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and enterprise_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Enterprises[0].ID).
s.Fixtures.Enterprises[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"enterprise_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -628,7 +628,7 @@ func (s *EnterpriseTestSuite) TestCreateEnterprisePoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
entity, err := s.Fixtures.Enterprises[0].GetEntity()

View file

@ -204,8 +204,8 @@ func (s *InstancesTestSuite) TestCreateInstanceDBCreateErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
WithArgs(pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -283,12 +283,12 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBRecordNotFoundErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
WithArgs(pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
WithArgs(instance.Name, pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
@ -320,12 +320,12 @@ func (s *InstancesTestSuite) TestDeleteInstanceDBDeleteErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
WithArgs(pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
WithArgs(instance.Name, pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE (name = ? and pool_id = ?) AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
WithArgs(instance.Name, pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
@ -373,8 +373,8 @@ func (s *InstancesTestSuite) TestAddInstanceEventDBUpdateErr() {
statusMsg := "test-status-message"
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
WithArgs(instance.Name).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
@ -422,8 +422,8 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateInstanceErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
WithArgs(instance.Name).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
@ -454,8 +454,8 @@ func (s *InstancesTestSuite) TestUpdateInstanceDBUpdateAddressErr() {
instance := s.Fixtures.Instances[0]
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT 1")).
WithArgs(instance.Name).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `instances` WHERE name = ? AND `instances`.`deleted_at` IS NULL ORDER BY `instances`.`id` LIMIT ?")).
WithArgs(instance.Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(instance.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `addresses` WHERE `addresses`.`instance_id` = ? AND `addresses`.`deleted_at` IS NULL")).
@ -545,8 +545,8 @@ func (s *InstancesTestSuite) TestPoolInstanceCountDBCountErr() {
pool := s.Fixtures.Pool
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
WithArgs(pool.ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(pool.ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(pool.ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT count(*) FROM `instances` WHERE pool_id = ? AND `instances`.`deleted_at` IS NULL")).

View file

@ -244,8 +244,8 @@ func (s *OrgTestSuite) TestGetOrganizationNotFound() {
func (s *OrgTestSuite) TestGetOrganizationDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE name = ? COLLATE NOCASE AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].Name).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE name = ? COLLATE NOCASE AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].Name, 1).
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow(s.Fixtures.Orgs[0].Name))
_, err := s.StoreSQLMocked.GetOrganization(context.Background(), s.Fixtures.Orgs[0].Name)
@ -292,8 +292,8 @@ func (s *OrgTestSuite) TestDeleteOrganizationInvalidOrgID() {
func (s *OrgTestSuite) TestDeleteOrganizationDBDeleteErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -328,8 +328,8 @@ func (s *OrgTestSuite) TestUpdateOrganizationDBEncryptErr() {
s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
_, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
@ -341,8 +341,8 @@ func (s *OrgTestSuite) TestUpdateOrganizationDBEncryptErr() {
func (s *OrgTestSuite) TestUpdateOrganizationDBSaveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -362,8 +362,8 @@ func (s *OrgTestSuite) TestUpdateOrganizationDBDecryptingErr() {
s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
_, err := s.StoreSQLMocked.UpdateOrganization(context.Background(), s.Fixtures.Orgs[0].ID, s.Fixtures.UpdateRepoParams)
@ -389,8 +389,8 @@ func (s *OrgTestSuite) TestGetOrganizationByIDInvalidOrgID() {
func (s *OrgTestSuite) TestGetOrganizationByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`org_id` = ? AND `pools`.`deleted_at` IS NULL")).
@ -446,11 +446,11 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolInvalidOrgID() {
func (s *OrgTestSuite) TestCreateOrganizationPoolDBCreateErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked creating pool error"))
entity, err := s.Fixtures.Orgs[0].GetEntity()
@ -465,16 +465,16 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBCreateErr() {
func (s *OrgTestSuite) TestCreateOrganizationDBPoolAlreadyExistErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Orgs[0].ID).
s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"org_id", "provider_name", "image", "flavor"}).
AddRow(
s.Fixtures.Orgs[0].ID,
@ -494,19 +494,19 @@ func (s *OrgTestSuite) TestCreateOrganizationDBPoolAlreadyExistErr() {
func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Orgs[0].ID).
s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
entity, err := s.Fixtures.Orgs[0].GetEntity()
@ -523,19 +523,19 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBAddingPoolErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Orgs[0].ID).
s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -559,19 +559,19 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBSaveTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Orgs[0].ID).
s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -598,19 +598,19 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Orgs[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `organizations` WHERE id = ? AND `organizations`.`deleted_at` IS NULL ORDER BY `organizations`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Orgs[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and org_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Orgs[0].ID).
s.Fixtures.Orgs[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"org_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -629,7 +629,7 @@ func (s *OrgTestSuite) TestCreateOrganizationPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
entity, err := s.Fixtures.Orgs[0].GetEntity()

View file

@ -171,8 +171,8 @@ func (s *PoolsTestSuite) TestDeletePoolByIDInvalidPoolID() {
func (s *PoolsTestSuite) TestDeletePoolByIDDBRemoveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1 ")).
WithArgs(s.Fixtures.Pools[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Pools[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Pools[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.

View file

@ -265,12 +265,12 @@ func (s *RepoTestSuite) TestGetRepositoryNotFound() {
func (s *RepoTestSuite) TestGetRepositoryDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE (name = ? COLLATE NOCASE and owner = ? COLLATE NOCASE) AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id`,`repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner, 1).
WillReturnRows(sqlmock.NewRows([]string{"name", "owner"}).AddRow(s.Fixtures.Repos[0].Name, s.Fixtures.Repos[0].Owner))
_, err := s.StoreSQLMocked.GetRepository(context.Background(), s.Fixtures.Repos[0].Owner, s.Fixtures.Repos[0].Name)
@ -331,8 +331,8 @@ func (s *RepoTestSuite) TestDeleteRepositoryInvalidRepoID() {
func (s *RepoTestSuite) TestDeleteRepositoryDBRemoveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -367,8 +367,8 @@ func (s *RepoTestSuite) TestUpdateRepositoryDBEncryptErr() {
s.StoreSQLMocked.cfg.Passphrase = wrongPassphrase
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
_, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
@ -379,8 +379,8 @@ func (s *RepoTestSuite) TestUpdateRepositoryDBEncryptErr() {
func (s *RepoTestSuite) TestUpdateRepositoryDBSaveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -400,8 +400,8 @@ func (s *RepoTestSuite) TestUpdateRepositoryDBDecryptingErr() {
s.Fixtures.UpdateRepoParams.WebhookSecret = webhookSecret
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
_, err := s.StoreSQLMocked.UpdateRepository(context.Background(), s.Fixtures.Repos[0].ID, s.Fixtures.UpdateRepoParams)
@ -427,8 +427,8 @@ func (s *RepoTestSuite) TestGetRepositoryByIDInvalidRepoID() {
func (s *RepoTestSuite) TestGetRepositoryByIDDBDecryptingErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE `pools`.`repo_id` = ? AND `pools`.`deleted_at` IS NULL")).
@ -483,11 +483,11 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolInvalidRepoID() {
func (s *RepoTestSuite) TestCreateRepositoryPoolDBCreateErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked creating pool error"))
entity, err := s.Fixtures.Repos[0].GetEntity()
@ -502,16 +502,16 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBCreateErr() {
func (s *RepoTestSuite) TestCreateRepositoryPoolDBPoolAlreadyExistErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Repos[0].ID).
s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"repo_id", "provider_name", "image", "flavor"}).
AddRow(
s.Fixtures.Repos[0].ID,
@ -532,19 +532,19 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBPoolAlreadyExistErr() {
func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Repos[0].ID).
s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnError(fmt.Errorf("mocked fetching tag error"))
entity, err := s.Fixtures.Repos[0].GetEntity()
@ -562,19 +562,19 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBAddingPoolErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Repos[0].ID).
s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -599,19 +599,19 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBSaveTagErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Repos[0].ID).
s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -638,19 +638,19 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Repos[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `repositories` WHERE id = ? AND `repositories`.`deleted_at` IS NULL ORDER BY `repositories`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Repos[0].ID))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE (provider_name = ? and image = ? and flavor = ? and repo_id = ?) AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WithArgs(
s.Fixtures.CreatePoolParams.ProviderName,
s.Fixtures.CreatePoolParams.Image,
s.Fixtures.CreatePoolParams.Flavor,
s.Fixtures.Repos[0].ID).
s.Fixtures.Repos[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"repo_id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `tags` WHERE name = ? AND `tags`.`deleted_at` IS NULL ORDER BY `tags`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"linux"}))
s.Fixtures.SQLMock.
ExpectExec(regexp.QuoteMeta("INSERT INTO `tags`")).
@ -669,7 +669,7 @@ func (s *RepoTestSuite) TestCreateRepositoryPoolDBFetchPoolErr() {
WillReturnResult(sqlmock.NewResult(1, 1))
s.Fixtures.SQLMock.ExpectCommit()
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT 1")).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `pools` WHERE id = ? AND `pools`.`deleted_at` IS NULL ORDER BY `pools`.`id` LIMIT ?")).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
entity, err := s.Fixtures.Repos[0].GetEntity()

View file

@ -168,12 +168,12 @@ func (s *UserTestSuite) TestCreateUserEmailAlreadyExist() {
func (s *UserTestSuite) TestCreateUserDBCreateErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
WithArgs(s.Fixtures.NewUserParams.Username).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
WithArgs(s.Fixtures.NewUserParams.Username, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
WithArgs(s.Fixtures.NewUserParams.Email).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE email = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
WithArgs(s.Fixtures.NewUserParams.Email, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.
@ -258,8 +258,8 @@ func (s *UserTestSuite) TestUpdateUserNotFound() {
func (s *UserTestSuite) TestUpdateUserDBSaveErr() {
s.Fixtures.SQLMock.
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT 1")).
WithArgs(s.Fixtures.Users[0].ID).
ExpectQuery(regexp.QuoteMeta("SELECT * FROM `users` WHERE username = ? AND `users`.`deleted_at` IS NULL ORDER BY `users`.`id` LIMIT ?")).
WithArgs(s.Fixtures.Users[0].ID, 1).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(s.Fixtures.Users[0].ID))
s.Fixtures.SQLMock.ExpectBegin()
s.Fixtures.SQLMock.

81
go.mod
View file

@ -4,56 +4,57 @@ go 1.21
require (
github.com/BurntSushi/toml v1.3.2
github.com/bradleyfalzon/ghinstallation/v2 v2.9.0
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0
github.com/cloudbase/garm-provider-common v0.1.1
github.com/felixge/httpsnoop v1.0.4
github.com/go-openapi/errors v0.21.0
github.com/go-openapi/runtime v0.26.2
github.com/go-openapi/strfmt v0.21.10
github.com/go-openapi/swag v0.22.5
github.com/golang-jwt/jwt/v5 v5.2.0
github.com/go-openapi/errors v0.22.0
github.com/go-openapi/runtime v0.28.0
github.com/go-openapi/strfmt v0.23.0
github.com/go-openapi/swag v0.23.0
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/go-github/v57 v57.0.0
github.com/google/uuid v1.5.0
github.com/google/uuid v1.6.0
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
github.com/gorilla/websocket v1.5.1
github.com/jedib0t/go-pretty/v6 v6.4.9
github.com/jedib0t/go-pretty/v6 v6.5.8
github.com/juju/clock v1.0.3
github.com/juju/retry v1.0.0
github.com/manifoldco/promptui v0.9.0
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/client_golang v1.19.0
github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.16.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.5.0
github.com/stretchr/testify v1.9.0
golang.org/x/crypto v0.22.0
golang.org/x/oauth2 v0.19.0
golang.org/x/sync v0.7.0
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gorm.io/datatypes v1.2.0
gorm.io/driver/mysql v1.5.2
gorm.io/driver/sqlite v1.5.4
gorm.io/gorm v1.25.5
gorm.io/driver/mysql v1.5.6
gorm.io/driver/sqlite v1.5.5
gorm.io/gorm v1.25.9
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.5 // indirect
github.com/go-openapi/jsonpointer v0.20.1 // indirect
github.com/go-openapi/jsonreference v0.20.3 // indirect
github.com/go-openapi/loads v0.21.3 // indirect
github.com/go-openapi/spec v0.20.12 // indirect
github.com/go-openapi/validate v0.22.4 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/loads v0.22.0 // indirect
github.com/go-openapi/spec v0.21.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-sql-driver/mysql v1.8.1 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-github/v60 v60.0.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
@ -65,27 +66,25 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-sqlite3 v1.14.19 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mattn/go-sqlite3 v1.14.22 // indirect
github.com/minio/sio v0.3.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.53.0 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
go.mongodb.org/mongo-driver v1.13.1 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sys v0.15.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.31.0 // indirect
go.mongodb.org/mongo-driver v1.15.0 // indirect
go.opentelemetry.io/otel v1.25.0 // indirect
go.opentelemetry.io/otel/metric v1.25.0 // indirect
go.opentelemetry.io/otel/trace v1.25.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/sys v0.19.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

219
go.sum
View file

@ -1,13 +1,15 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradleyfalzon/ghinstallation/v2 v2.9.0 h1:HmxIYqnxubRYcYGRc5v3wUekmo5Wv2uX3gukmWJ0AFk=
github.com/bradleyfalzon/ghinstallation/v2 v2.9.0/go.mod h1:wmkTDJf8CmVypxE8ijIStFnKoTa6solK5QfdmJrP9KI=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 h1:XWuWBRFEpqVrHepQob9yPS3Xg4K3Wr9QCx4fu8HbUNg=
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0/go.mod h1:qoGA4DxWPaYTgVCrmEspVSjlTu4WYAiSxMIhorMRXXc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
@ -26,56 +28,52 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI=
github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M=
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE=
github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ=
github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM=
github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew=
github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc=
github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0=
github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw=
github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI=
github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4=
github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk=
github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis=
github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys=
github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8=
github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A=
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs=
github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85PimQh4oygmLIxHw=
github.com/google/go-github/v60 v60.0.0 h1:oLG98PsLauFvvu4D/YPxq374jhSxFYdzQGNCyONLfn8=
github.com/google/go-github/v60 v60.0.0/go.mod h1:ByhX2dP9XT9o/ll2yXAu2VD8l5eNVg8hD4Cr0S/LmQk=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
@ -90,8 +88,8 @@ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jedib0t/go-pretty/v6 v6.4.9 h1:vZ6bjGg2eBSrJn365qlxGcaWu09Id+LHtrfDWlB2Usc=
github.com/jedib0t/go-pretty/v6 v6.4.9/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs=
github.com/jedib0t/go-pretty/v6 v6.5.8 h1:8BCzJdSvUbaDuRba4YVh+SKMGcAAKdkcF3SVFbrHAtQ=
github.com/jedib0t/go-pretty/v6 v6.5.8/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@ -111,7 +109,6 @@ github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4=
github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ=
github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q=
github.com/juju/utils/v3 v3.0.0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -125,20 +122,16 @@ github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE=
github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ=
github.com/minio/sio v0.3.1 h1:d59r5RTHb1OsQaSl1EaTWurzMMDRLA5fgNmjzD4eVu4=
github.com/minio/sio v0.3.1/go.mod h1:S0ovgVgc+sTlQyhiXA1ppBLv7REM7TYi5yyq2qL/Y6o=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@ -147,21 +140,20 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -170,86 +162,42 @@ github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyh
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0=
github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE=
go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -260,19 +208,18 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/datatypes v1.2.0 h1:5YT+eokWdIxhJgWHdrb2zYUimyk0+TaFth+7a0ybzco=
gorm.io/datatypes v1.2.0/go.mod h1:o1dh0ZvjIjhH/bngTpypG6lVRJ5chTBxE09FH/71k04=
gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs=
gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8=
gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8=
gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/driver/sqlite v1.5.4 h1:IqXwXi8M/ZlPzH/947tn5uik3aYQslP9BVveoax0nV0=
gorm.io/driver/sqlite v1.5.4/go.mod h1:qxAuCol+2r6PannQDpOP1FP6ag3mKi4esLnB/jHed+4=
gorm.io/driver/sqlite v1.5.5 h1:7MDMtUZhV065SilG62E0MquljeArQZNfJnjd9i9gx3E=
gorm.io/driver/sqlite v1.5.5/go.mod h1:6NgQ7sQWAIFsPrJJl1lSNSu2TABh0ZZ/zm5fosATavE=
gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig=
gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8=
gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=

View file

@ -1,4 +1,4 @@
Copyright 2010 The Go Authors. All rights reserved.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -25,4 +25,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

14
vendor/filippo.io/edwards25519/README.md generated vendored Normal file
View file

@ -0,0 +1,14 @@
# filippo.io/edwards25519
```
import "filippo.io/edwards25519"
```
This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.

20
vendor/filippo.io/edwards25519/doc.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright (c) 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package edwards25519 implements group logic for the twisted Edwards curve
//
// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
//
// This is better known as the Edwards curve equivalent to Curve25519, and is
// the curve used by the Ed25519 signature scheme.
//
// Most users don't need this package, and should instead use crypto/ed25519 for
// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
// github.com/gtank/ristretto255 for prime order group logic.
//
// However, developers who do need to interact with low-level edwards25519
// operations can use this package, which is an extended version of
// crypto/internal/edwards25519 from the standard library repackaged as
// an importable module.
package edwards25519

427
vendor/filippo.io/edwards25519/edwards25519.go generated vendored Normal file
View file

@ -0,0 +1,427 @@
// Copyright (c) 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package edwards25519
import (
"errors"
"filippo.io/edwards25519/field"
)
// Point types.
type projP1xP1 struct {
X, Y, Z, T field.Element
}
type projP2 struct {
X, Y, Z field.Element
}
// Point represents a point on the edwards25519 curve.
//
// This type works similarly to math/big.Int, and all arguments and receivers
// are allowed to alias.
//
// The zero value is NOT valid, and it may be used only as a receiver.
type Point struct {
// Make the type not comparable (i.e. used with == or as a map key), as
// equivalent points can be represented by different Go values.
_ incomparable
// The point is internally represented in extended coordinates (X, Y, Z, T)
// where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
x, y, z, t field.Element
}
type incomparable [0]func()
func checkInitialized(points ...*Point) {
for _, p := range points {
if p.x == (field.Element{}) && p.y == (field.Element{}) {
panic("edwards25519: use of uninitialized Point")
}
}
}
type projCached struct {
YplusX, YminusX, Z, T2d field.Element
}
type affineCached struct {
YplusX, YminusX, T2d field.Element
}
// Constructors.
func (v *projP2) Zero() *projP2 {
v.X.Zero()
v.Y.One()
v.Z.One()
return v
}
// identity is the point at infinity.
var identity, _ = new(Point).SetBytes([]byte{
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// NewIdentityPoint returns a new Point set to the identity.
func NewIdentityPoint() *Point {
return new(Point).Set(identity)
}
// generator is the canonical curve basepoint. See TestGenerator for the
// correspondence of this encoding with the values in RFC 8032.
var generator, _ = new(Point).SetBytes([]byte{
0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
// NewGeneratorPoint returns a new Point set to the canonical generator.
func NewGeneratorPoint() *Point {
return new(Point).Set(generator)
}
func (v *projCached) Zero() *projCached {
v.YplusX.One()
v.YminusX.One()
v.Z.One()
v.T2d.Zero()
return v
}
func (v *affineCached) Zero() *affineCached {
v.YplusX.One()
v.YminusX.One()
v.T2d.Zero()
return v
}
// Assignments.
// Set sets v = u, and returns v.
func (v *Point) Set(u *Point) *Point {
*v = *u
return v
}
// Encoding.
// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
// Section 5.1.2.
func (v *Point) Bytes() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var buf [32]byte
return v.bytes(&buf)
}
func (v *Point) bytes(buf *[32]byte) []byte {
checkInitialized(v)
var zInv, x, y field.Element
zInv.Invert(&v.z) // zInv = 1 / Z
x.Multiply(&v.x, &zInv) // x = X / Z
y.Multiply(&v.y, &zInv) // y = Y / Z
out := copyFieldElement(buf, &y)
out[31] |= byte(x.IsNegative() << 7)
return out
}
var feOne = new(field.Element).One()
// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
// represent a valid point on the curve, SetBytes returns nil and an error and
// the receiver is unchanged. Otherwise, SetBytes returns v.
//
// Note that SetBytes accepts all non-canonical encodings of valid points.
// That is, it follows decoding rules that match most implementations in
// the ecosystem rather than RFC 8032.
func (v *Point) SetBytes(x []byte) (*Point, error) {
// Specifically, the non-canonical encodings that are accepted are
// 1) the ones where the field element is not reduced (see the
// (*field.Element).SetBytes docs) and
// 2) the ones where the x-coordinate is zero and the sign bit is set.
//
// Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
// specifically the "Canonical A, R" section.
y, err := new(field.Element).SetBytes(x)
if err != nil {
return nil, errors.New("edwards25519: invalid point encoding length")
}
// -x² + y² = 1 + dx²y²
// x² + dx²y² = x²(dy² + 1) = y² - 1
// x² = (y² - 1) / (dy² + 1)
// u = y² - 1
y2 := new(field.Element).Square(y)
u := new(field.Element).Subtract(y2, feOne)
// v = dy² + 1
vv := new(field.Element).Multiply(y2, d)
vv = vv.Add(vv, feOne)
// x = +√(u/v)
xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
if wasSquare == 0 {
return nil, errors.New("edwards25519: invalid point encoding")
}
// Select the negative square root if the sign bit is set.
xxNeg := new(field.Element).Negate(xx)
xx = xx.Select(xxNeg, xx, int(x[31]>>7))
v.x.Set(xx)
v.y.Set(y)
v.z.One()
v.t.Multiply(xx, y) // xy = T / Z
return v, nil
}
func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
copy(buf[:], v.Bytes())
return buf[:]
}
// Conversions.
func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
v.X.Multiply(&p.X, &p.T)
v.Y.Multiply(&p.Y, &p.Z)
v.Z.Multiply(&p.Z, &p.T)
return v
}
func (v *projP2) FromP3(p *Point) *projP2 {
v.X.Set(&p.x)
v.Y.Set(&p.y)
v.Z.Set(&p.z)
return v
}
func (v *Point) fromP1xP1(p *projP1xP1) *Point {
v.x.Multiply(&p.X, &p.T)
v.y.Multiply(&p.Y, &p.Z)
v.z.Multiply(&p.Z, &p.T)
v.t.Multiply(&p.X, &p.Y)
return v
}
func (v *Point) fromP2(p *projP2) *Point {
v.x.Multiply(&p.X, &p.Z)
v.y.Multiply(&p.Y, &p.Z)
v.z.Square(&p.Z)
v.t.Multiply(&p.X, &p.Y)
return v
}
// d is a constant in the curve equation.
var d, _ = new(field.Element).SetBytes([]byte{
0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
var d2 = new(field.Element).Add(d, d)
func (v *projCached) FromP3(p *Point) *projCached {
v.YplusX.Add(&p.y, &p.x)
v.YminusX.Subtract(&p.y, &p.x)
v.Z.Set(&p.z)
v.T2d.Multiply(&p.t, d2)
return v
}
func (v *affineCached) FromP3(p *Point) *affineCached {
v.YplusX.Add(&p.y, &p.x)
v.YminusX.Subtract(&p.y, &p.x)
v.T2d.Multiply(&p.t, d2)
var invZ field.Element
invZ.Invert(&p.z)
v.YplusX.Multiply(&v.YplusX, &invZ)
v.YminusX.Multiply(&v.YminusX, &invZ)
v.T2d.Multiply(&v.T2d, &invZ)
return v
}
// (Re)addition and subtraction.
// Add sets v = p + q, and returns v.
func (v *Point) Add(p, q *Point) *Point {
checkInitialized(p, q)
qCached := new(projCached).FromP3(q)
result := new(projP1xP1).Add(p, qCached)
return v.fromP1xP1(result)
}
// Subtract sets v = p - q, and returns v.
func (v *Point) Subtract(p, q *Point) *Point {
checkInitialized(p, q)
qCached := new(projCached).FromP3(q)
result := new(projP1xP1).Sub(p, qCached)
return v.fromP1xP1(result)
}
func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
YplusX.Add(&p.y, &p.x)
YminusX.Subtract(&p.y, &p.x)
PP.Multiply(&YplusX, &q.YplusX)
MM.Multiply(&YminusX, &q.YminusX)
TT2d.Multiply(&p.t, &q.T2d)
ZZ2.Multiply(&p.z, &q.Z)
ZZ2.Add(&ZZ2, &ZZ2)
v.X.Subtract(&PP, &MM)
v.Y.Add(&PP, &MM)
v.Z.Add(&ZZ2, &TT2d)
v.T.Subtract(&ZZ2, &TT2d)
return v
}
func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
YplusX.Add(&p.y, &p.x)
YminusX.Subtract(&p.y, &p.x)
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
TT2d.Multiply(&p.t, &q.T2d)
ZZ2.Multiply(&p.z, &q.Z)
ZZ2.Add(&ZZ2, &ZZ2)
v.X.Subtract(&PP, &MM)
v.Y.Add(&PP, &MM)
v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
v.T.Add(&ZZ2, &TT2d) // flipped sign
return v
}
func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
YplusX.Add(&p.y, &p.x)
YminusX.Subtract(&p.y, &p.x)
PP.Multiply(&YplusX, &q.YplusX)
MM.Multiply(&YminusX, &q.YminusX)
TT2d.Multiply(&p.t, &q.T2d)
Z2.Add(&p.z, &p.z)
v.X.Subtract(&PP, &MM)
v.Y.Add(&PP, &MM)
v.Z.Add(&Z2, &TT2d)
v.T.Subtract(&Z2, &TT2d)
return v
}
func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
YplusX.Add(&p.y, &p.x)
YminusX.Subtract(&p.y, &p.x)
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
TT2d.Multiply(&p.t, &q.T2d)
Z2.Add(&p.z, &p.z)
v.X.Subtract(&PP, &MM)
v.Y.Add(&PP, &MM)
v.Z.Subtract(&Z2, &TT2d) // flipped sign
v.T.Add(&Z2, &TT2d) // flipped sign
return v
}
// Doubling.
func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
var XX, YY, ZZ2, XplusYsq field.Element
XX.Square(&p.X)
YY.Square(&p.Y)
ZZ2.Square(&p.Z)
ZZ2.Add(&ZZ2, &ZZ2)
XplusYsq.Add(&p.X, &p.Y)
XplusYsq.Square(&XplusYsq)
v.Y.Add(&YY, &XX)
v.Z.Subtract(&YY, &XX)
v.X.Subtract(&XplusYsq, &v.Y)
v.T.Subtract(&ZZ2, &v.Z)
return v
}
// Negation.
// Negate sets v = -p, and returns v.
func (v *Point) Negate(p *Point) *Point {
checkInitialized(p)
v.x.Negate(&p.x)
v.y.Set(&p.y)
v.z.Set(&p.z)
v.t.Negate(&p.t)
return v
}
// Equal returns 1 if v is equivalent to u, and 0 otherwise.
func (v *Point) Equal(u *Point) int {
checkInitialized(v, u)
var t1, t2, t3, t4 field.Element
t1.Multiply(&v.x, &u.z)
t2.Multiply(&u.x, &v.z)
t3.Multiply(&v.y, &u.z)
t4.Multiply(&u.y, &v.z)
return t1.Equal(&t2) & t3.Equal(&t4)
}
// Constant-time operations
// Select sets v to a if cond == 1 and to b if cond == 0.
func (v *projCached) Select(a, b *projCached, cond int) *projCached {
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
v.Z.Select(&a.Z, &b.Z, cond)
v.T2d.Select(&a.T2d, &b.T2d, cond)
return v
}
// Select sets v to a if cond == 1 and to b if cond == 0.
func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
v.T2d.Select(&a.T2d, &b.T2d, cond)
return v
}
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
func (v *projCached) CondNeg(cond int) *projCached {
v.YplusX.Swap(&v.YminusX, cond)
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
return v
}
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
func (v *affineCached) CondNeg(cond int) *affineCached {
v.YplusX.Swap(&v.YminusX, cond)
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
return v
}

349
vendor/filippo.io/edwards25519/extra.go generated vendored Normal file
View file

@ -0,0 +1,349 @@
// Copyright (c) 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package edwards25519
// This file contains additional functionality that is not included in the
// upstream crypto/internal/edwards25519 package.
import (
"errors"
"filippo.io/edwards25519/field"
)
// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap. Don't change the style without making
// sure it doesn't increase the inliner cost.
var e [4]field.Element
X, Y, Z, T = v.extendedCoordinates(&e)
return
}
func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
checkInitialized(v)
X = e[0].Set(&v.x)
Y = e[1].Set(&v.y)
Z = e[2].Set(&v.z)
T = e[3].Set(&v.t)
return
}
// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
//
// If the coordinates are invalid or don't represent a valid point on the curve,
// SetExtendedCoordinates returns nil and an error and the receiver is
// unchanged. Otherwise, SetExtendedCoordinates returns v.
func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
if !isOnCurve(X, Y, Z, T) {
return nil, errors.New("edwards25519: invalid point coordinates")
}
v.x.Set(X)
v.y.Set(Y)
v.z.Set(Z)
v.t.Set(T)
return v, nil
}
func isOnCurve(X, Y, Z, T *field.Element) bool {
var lhs, rhs field.Element
XX := new(field.Element).Square(X)
YY := new(field.Element).Square(Y)
ZZ := new(field.Element).Square(Z)
TT := new(field.Element).Square(T)
// -x² + y² = 1 + dx²y²
// -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
// -X² + Y² = Z² + dT²
lhs.Subtract(YY, XX)
rhs.Multiply(d, TT).Add(&rhs, ZZ)
if lhs.Equal(&rhs) != 1 {
return false
}
// xy = T/Z
// XY/Z² = T/Z
// XY = TZ
lhs.Multiply(X, Y)
rhs.Multiply(T, Z)
return lhs.Equal(&rhs) == 1
}
// BytesMontgomery converts v to a point on the birationally-equivalent
// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
// according to RFC 7748.
//
// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
// to the same value. If v is the identity point, BytesMontgomery returns 32
// zero bytes, analogously to the X25519 function.
//
// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
// while every valid edwards25519 point has a unique u-coordinate Montgomery
// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
// to any edwards25519 point, and every other X25519 input corresponds to two
// edwards25519 points.
func (v *Point) BytesMontgomery() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var buf [32]byte
return v.bytesMontgomery(&buf)
}
func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
checkInitialized(v)
// RFC 7748, Section 4.1 provides the bilinear map to calculate the
// Montgomery u-coordinate
//
// u = (1 + y) / (1 - y)
//
// where y = Y / Z.
var y, recip, u field.Element
y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
return copyFieldElement(buf, &u)
}
// MultByCofactor sets v = 8 * p, and returns v.
func (v *Point) MultByCofactor(p *Point) *Point {
checkInitialized(p)
result := projP1xP1{}
pp := (&projP2{}).FromP3(p)
result.Double(pp)
pp.FromP1xP1(&result)
result.Double(pp)
pp.FromP1xP1(&result)
result.Double(pp)
return v.fromP1xP1(&result)
}
// Given k > 0, set s = s**(2*i).
func (s *Scalar) pow2k(k int) {
for i := 0; i < k; i++ {
s.Multiply(s, s)
}
}
// Invert sets s to the inverse of a nonzero scalar v, and returns s.
//
// If t is zero, Invert returns zero.
func (s *Scalar) Invert(t *Scalar) *Scalar {
// Uses a hardcoded sliding window of width 4.
var table [8]Scalar
var tt Scalar
tt.Multiply(t, t)
table[0] = *t
for i := 0; i < 7; i++ {
table[i+1].Multiply(&table[i], &tt)
}
// Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
// so t**k = t[k/2] for odd k
// To compute the sliding window digits, use the following Sage script:
// sage: import itertools
// sage: def sliding_window(w,k):
// ....: digits = []
// ....: while k > 0:
// ....: if k % 2 == 1:
// ....: kmod = k % (2**w)
// ....: digits.append(kmod)
// ....: k = k - kmod
// ....: else:
// ....: digits.append(0)
// ....: k = k // 2
// ....: return digits
// Now we can compute s roughly as follows:
// sage: s = 1
// sage: for coeff in reversed(sliding_window(4,l-2)):
// ....: s = s*s
// ....: if coeff > 0 :
// ....: s = s*t**coeff
// This works on one bit at a time, with many runs of zeros.
// The digits can be collapsed into [(count, coeff)] as follows:
// sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
// Entries of the form (k, 0) turn into pow2k(k)
// Entries of the form (1, coeff) turn into a squaring and then a table lookup.
// We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
*s = table[1/2]
s.pow2k(127 + 1)
s.Multiply(s, &table[1/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[9/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[11/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[13/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[15/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[7/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[15/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[5/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[1/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[15/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[15/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[7/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[3/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[11/2])
s.pow2k(5 + 1)
s.Multiply(s, &table[11/2])
s.pow2k(9 + 1)
s.Multiply(s, &table[9/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[3/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[3/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[3/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[9/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[7/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[3/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[13/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[7/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[9/2])
s.pow2k(3 + 1)
s.Multiply(s, &table[15/2])
s.pow2k(4 + 1)
s.Multiply(s, &table[11/2])
return s
}
// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
//
// Execution time depends only on the lengths of the two slices, which must match.
func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
if len(scalars) != len(points) {
panic("edwards25519: called MultiScalarMult with different size inputs")
}
checkInitialized(points...)
// Proceed as in the single-base case, but share doublings
// between each point in the multiscalar equation.
// Build lookup tables for each point
tables := make([]projLookupTable, len(points))
for i := range tables {
tables[i].FromP3(points[i])
}
// Compute signed radix-16 digits for each scalar
digits := make([][64]int8, len(scalars))
for i := range digits {
digits[i] = scalars[i].signedRadix16()
}
// Unwrap first loop iteration to save computing 16*identity
multiple := &projCached{}
tmp1 := &projP1xP1{}
tmp2 := &projP2{}
// Lookup-and-add the appropriate multiple of each input point
for j := range tables {
tables[j].SelectInto(multiple, digits[j][63])
tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
v.fromP1xP1(tmp1) // update v
}
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
for i := 62; i >= 0; i-- {
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
// Lookup-and-add the appropriate multiple of each input point
for j := range tables {
tables[j].SelectInto(multiple, digits[j][i])
tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
v.fromP1xP1(tmp1) // update v
}
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
}
return v
}
// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
//
// Execution time depends on the inputs.
func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
if len(scalars) != len(points) {
panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
}
checkInitialized(points...)
// Generalize double-base NAF computation to arbitrary sizes.
// Here all the points are dynamic, so we only use the smaller
// tables.
// Build lookup tables for each point
tables := make([]nafLookupTable5, len(points))
for i := range tables {
tables[i].FromP3(points[i])
}
// Compute a NAF for each scalar
nafs := make([][256]int8, len(scalars))
for i := range nafs {
nafs[i] = scalars[i].nonAdjacentForm(5)
}
multiple := &projCached{}
tmp1 := &projP1xP1{}
tmp2 := &projP2{}
tmp2.Zero()
// Move from high to low bits, doubling the accumulator
// at each iteration and checking whether there is a nonzero
// coefficient to look up a multiple of.
//
// Skip trying to find the first nonzero coefficent, because
// searching might be more work than a few extra doublings.
for i := 255; i >= 0; i-- {
tmp1.Double(tmp2)
for j := range nafs {
if nafs[j][i] > 0 {
v.fromP1xP1(tmp1)
tables[j].SelectInto(multiple, nafs[j][i])
tmp1.Add(v, multiple)
} else if nafs[j][i] < 0 {
v.fromP1xP1(tmp1)
tables[j].SelectInto(multiple, -nafs[j][i])
tmp1.Sub(v, multiple)
}
}
tmp2.FromP1xP1(tmp1)
}
v.fromP2(tmp2)
return v
}

420
vendor/filippo.io/edwards25519/field/fe.go generated vendored Normal file
View file

@ -0,0 +1,420 @@
// Copyright (c) 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package field implements fast arithmetic modulo 2^255-19.
package field
import (
"crypto/subtle"
"encoding/binary"
"errors"
"math/bits"
)
// Element represents an element of the field GF(2^255-19). Note that this
// is not a cryptographically secure group, and should only be used to interact
// with edwards25519.Point coordinates.
//
// This type works similarly to math/big.Int, and all arguments and receivers
// are allowed to alias.
//
// The zero value is a valid zero element.
type Element struct {
// An element t represents the integer
// t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
//
// Between operations, all limbs are expected to be lower than 2^52.
l0 uint64
l1 uint64
l2 uint64
l3 uint64
l4 uint64
}
const maskLow51Bits uint64 = (1 << 51) - 1
var feZero = &Element{0, 0, 0, 0, 0}
// Zero sets v = 0, and returns v.
func (v *Element) Zero() *Element {
*v = *feZero
return v
}
var feOne = &Element{1, 0, 0, 0, 0}
// One sets v = 1, and returns v.
func (v *Element) One() *Element {
*v = *feOne
return v
}
// reduce reduces v modulo 2^255 - 19 and returns it.
func (v *Element) reduce() *Element {
v.carryPropagate()
// After the light reduction we now have a field element representation
// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
c := (v.l0 + 19) >> 51
c = (v.l1 + c) >> 51
c = (v.l2 + c) >> 51
c = (v.l3 + c) >> 51
c = (v.l4 + c) >> 51
// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
// effectively applying the reduction identity to the carry.
v.l0 += 19 * c
v.l1 += v.l0 >> 51
v.l0 = v.l0 & maskLow51Bits
v.l2 += v.l1 >> 51
v.l1 = v.l1 & maskLow51Bits
v.l3 += v.l2 >> 51
v.l2 = v.l2 & maskLow51Bits
v.l4 += v.l3 >> 51
v.l3 = v.l3 & maskLow51Bits
// no additional carry
v.l4 = v.l4 & maskLow51Bits
return v
}
// Add sets v = a + b, and returns v.
func (v *Element) Add(a, b *Element) *Element {
v.l0 = a.l0 + b.l0
v.l1 = a.l1 + b.l1
v.l2 = a.l2 + b.l2
v.l3 = a.l3 + b.l3
v.l4 = a.l4 + b.l4
// Using the generic implementation here is actually faster than the
// assembly. Probably because the body of this function is so simple that
// the compiler can figure out better optimizations by inlining the carry
// propagation.
return v.carryPropagateGeneric()
}
// Subtract sets v = a - b, and returns v.
func (v *Element) Subtract(a, b *Element) *Element {
// We first add 2 * p, to guarantee the subtraction won't underflow, and
// then subtract b (which can be up to 2^255 + 2^13 * 19).
v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
return v.carryPropagate()
}
// Negate sets v = -a, and returns v.
func (v *Element) Negate(a *Element) *Element {
return v.Subtract(feZero, a)
}
// Invert sets v = 1/z mod p, and returns v.
//
// If z == 0, Invert returns v = 0.
func (v *Element) Invert(z *Element) *Element {
// Inversion is implemented as exponentiation with exponent p 2. It uses the
// same sequence of 255 squarings and 11 multiplications as [Curve25519].
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
z2.Square(z) // 2
t.Square(&z2) // 4
t.Square(&t) // 8
z9.Multiply(&t, z) // 9
z11.Multiply(&z9, &z2) // 11
t.Square(&z11) // 22
z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
t.Square(&z2_5_0) // 2^6 - 2^1
for i := 0; i < 4; i++ {
t.Square(&t) // 2^10 - 2^5
}
z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
t.Square(&z2_10_0) // 2^11 - 2^1
for i := 0; i < 9; i++ {
t.Square(&t) // 2^20 - 2^10
}
z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
t.Square(&z2_20_0) // 2^21 - 2^1
for i := 0; i < 19; i++ {
t.Square(&t) // 2^40 - 2^20
}
t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
t.Square(&t) // 2^41 - 2^1
for i := 0; i < 9; i++ {
t.Square(&t) // 2^50 - 2^10
}
z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
t.Square(&z2_50_0) // 2^51 - 2^1
for i := 0; i < 49; i++ {
t.Square(&t) // 2^100 - 2^50
}
z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
t.Square(&z2_100_0) // 2^101 - 2^1
for i := 0; i < 99; i++ {
t.Square(&t) // 2^200 - 2^100
}
t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
t.Square(&t) // 2^201 - 2^1
for i := 0; i < 49; i++ {
t.Square(&t) // 2^250 - 2^50
}
t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
t.Square(&t) // 2^251 - 2^1
t.Square(&t) // 2^252 - 2^2
t.Square(&t) // 2^253 - 2^3
t.Square(&t) // 2^254 - 2^4
t.Square(&t) // 2^255 - 2^5
return v.Multiply(&t, &z11) // 2^255 - 21
}
// Set sets v = a, and returns v.
func (v *Element) Set(a *Element) *Element {
*v = *a
return v
}
// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
// not of the right length, SetBytes returns nil and an error, and the
// receiver is unchanged.
//
// Consistent with RFC 7748, the most significant bit (the high bit of the
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
// are accepted. Note that this is laxer than specified by RFC 8032, but
// consistent with most Ed25519 implementations.
func (v *Element) SetBytes(x []byte) (*Element, error) {
if len(x) != 32 {
return nil, errors.New("edwards25519: invalid field element input size")
}
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
v.l0 = binary.LittleEndian.Uint64(x[0:8])
v.l0 &= maskLow51Bits
// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
v.l1 &= maskLow51Bits
// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
v.l2 &= maskLow51Bits
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
v.l3 &= maskLow51Bits
// Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
// Note: not bytes 25:33, shift 4, to avoid overread.
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
v.l4 &= maskLow51Bits
return v, nil
}
// Bytes returns the canonical 32-byte little-endian encoding of v.
func (v *Element) Bytes() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var out [32]byte
return v.bytes(&out)
}
func (v *Element) bytes(out *[32]byte) []byte {
t := *v
t.reduce()
var buf [8]byte
for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
bitsOffset := i * 51
binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
for i, bb := range buf {
off := bitsOffset/8 + i
if off >= len(out) {
break
}
out[off] |= bb
}
}
return out[:]
}
// Equal returns 1 if v and u are equal, and 0 otherwise.
func (v *Element) Equal(u *Element) int {
sa, sv := u.Bytes(), v.Bytes()
return subtle.ConstantTimeCompare(sa, sv)
}
// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
// Select sets v to a if cond == 1, and to b if cond == 0.
func (v *Element) Select(a, b *Element, cond int) *Element {
m := mask64Bits(cond)
v.l0 = (m & a.l0) | (^m & b.l0)
v.l1 = (m & a.l1) | (^m & b.l1)
v.l2 = (m & a.l2) | (^m & b.l2)
v.l3 = (m & a.l3) | (^m & b.l3)
v.l4 = (m & a.l4) | (^m & b.l4)
return v
}
// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
func (v *Element) Swap(u *Element, cond int) {
m := mask64Bits(cond)
t := m & (v.l0 ^ u.l0)
v.l0 ^= t
u.l0 ^= t
t = m & (v.l1 ^ u.l1)
v.l1 ^= t
u.l1 ^= t
t = m & (v.l2 ^ u.l2)
v.l2 ^= t
u.l2 ^= t
t = m & (v.l3 ^ u.l3)
v.l3 ^= t
u.l3 ^= t
t = m & (v.l4 ^ u.l4)
v.l4 ^= t
u.l4 ^= t
}
// IsNegative returns 1 if v is negative, and 0 otherwise.
func (v *Element) IsNegative() int {
return int(v.Bytes()[0] & 1)
}
// Absolute sets v to |u|, and returns v.
func (v *Element) Absolute(u *Element) *Element {
return v.Select(new(Element).Negate(u), u, u.IsNegative())
}
// Multiply sets v = x * y, and returns v.
func (v *Element) Multiply(x, y *Element) *Element {
feMul(v, x, y)
return v
}
// Square sets v = x * x, and returns v.
func (v *Element) Square(x *Element) *Element {
feSquare(v, x)
return v
}
// Mult32 sets v = x * y, and returns v.
func (v *Element) Mult32(x *Element, y uint32) *Element {
x0lo, x0hi := mul51(x.l0, y)
x1lo, x1hi := mul51(x.l1, y)
x2lo, x2hi := mul51(x.l2, y)
x3lo, x3hi := mul51(x.l3, y)
x4lo, x4hi := mul51(x.l4, y)
v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
v.l1 = x1lo + x0hi
v.l2 = x2lo + x1hi
v.l3 = x3lo + x2hi
v.l4 = x4lo + x3hi
// The hi portions are going to be only 32 bits, plus any previous excess,
// so we can skip the carry propagation.
return v
}
// mul51 returns lo + hi * 2⁵¹ = a * b.
func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
mh, ml := bits.Mul64(a, uint64(b))
lo = ml & maskLow51Bits
hi = (mh << 13) | (ml >> 51)
return
}
// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
func (v *Element) Pow22523(x *Element) *Element {
var t0, t1, t2 Element
t0.Square(x) // x^2
t1.Square(&t0) // x^4
t1.Square(&t1) // x^8
t1.Multiply(x, &t1) // x^9
t0.Multiply(&t0, &t1) // x^11
t0.Square(&t0) // x^22
t0.Multiply(&t1, &t0) // x^31
t1.Square(&t0) // x^62
for i := 1; i < 5; i++ { // x^992
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
t1.Square(&t0) // 2^11 - 2
for i := 1; i < 10; i++ { // 2^20 - 2^10
t1.Square(&t1)
}
t1.Multiply(&t1, &t0) // 2^20 - 1
t2.Square(&t1) // 2^21 - 2
for i := 1; i < 20; i++ { // 2^40 - 2^20
t2.Square(&t2)
}
t1.Multiply(&t2, &t1) // 2^40 - 1
t1.Square(&t1) // 2^41 - 2
for i := 1; i < 10; i++ { // 2^50 - 2^10
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // 2^50 - 1
t1.Square(&t0) // 2^51 - 2
for i := 1; i < 50; i++ { // 2^100 - 2^50
t1.Square(&t1)
}
t1.Multiply(&t1, &t0) // 2^100 - 1
t2.Square(&t1) // 2^101 - 2
for i := 1; i < 100; i++ { // 2^200 - 2^100
t2.Square(&t2)
}
t1.Multiply(&t2, &t1) // 2^200 - 1
t1.Square(&t1) // 2^201 - 2
for i := 1; i < 50; i++ { // 2^250 - 2^50
t1.Square(&t1)
}
t0.Multiply(&t1, &t0) // 2^250 - 1
t0.Square(&t0) // 2^251 - 2
t0.Square(&t0) // 2^252 - 4
return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
}
// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
var sqrtM1 = &Element{1718705420411056, 234908883556509,
2233514472574048, 2117202627021982, 765476049583133}
// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
//
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
t0 := new(Element)
// r = (u * v3) * (u * v7)^((p-5)/8)
v2 := new(Element).Square(v)
uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
uv7 := new(Element).Multiply(uv3, t0.Square(v2))
rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
uNeg := new(Element).Negate(u)
correctSignSqrt := check.Equal(u)
flippedSignSqrt := check.Equal(uNeg)
flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
r.Absolute(rr) // Choose the nonnegative square root.
return r, correctSignSqrt | flippedSignSqrt
}

16
vendor/filippo.io/edwards25519/field/fe_amd64.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
package field
// feMul sets out = a * b. It works like feMulGeneric.
//
//go:noescape
func feMul(out *Element, a *Element, b *Element)
// feSquare sets out = a * a. It works like feSquareGeneric.
//
//go:noescape
func feSquare(out *Element, a *Element)

379
vendor/filippo.io/edwards25519/field/fe_amd64.s generated vendored Normal file
View file

@ -0,0 +1,379 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
#include "textflag.h"
// func feMul(out *Element, a *Element, b *Element)
TEXT ·feMul(SB), NOSPLIT, $0-24
MOVQ a+8(FP), CX
MOVQ b+16(FP), BX
// r0 = a0×b0
MOVQ (CX), AX
MULQ (BX)
MOVQ AX, DI
MOVQ DX, SI
// r0 += 19×a1×b4
MOVQ 8(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a2×b3
MOVQ 16(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a3×b2
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 16(BX)
ADDQ AX, DI
ADCQ DX, SI
// r0 += 19×a4×b1
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 8(BX)
ADDQ AX, DI
ADCQ DX, SI
// r1 = a0×b1
MOVQ (CX), AX
MULQ 8(BX)
MOVQ AX, R9
MOVQ DX, R8
// r1 += a1×b0
MOVQ 8(CX), AX
MULQ (BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a2×b4
MOVQ 16(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a3×b3
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, R9
ADCQ DX, R8
// r1 += 19×a4×b2
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 16(BX)
ADDQ AX, R9
ADCQ DX, R8
// r2 = a0×b2
MOVQ (CX), AX
MULQ 16(BX)
MOVQ AX, R11
MOVQ DX, R10
// r2 += a1×b1
MOVQ 8(CX), AX
MULQ 8(BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += a2×b0
MOVQ 16(CX), AX
MULQ (BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += 19×a3×b4
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R11
ADCQ DX, R10
// r2 += 19×a4×b3
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(BX)
ADDQ AX, R11
ADCQ DX, R10
// r3 = a0×b3
MOVQ (CX), AX
MULQ 24(BX)
MOVQ AX, R13
MOVQ DX, R12
// r3 += a1×b2
MOVQ 8(CX), AX
MULQ 16(BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += a2×b1
MOVQ 16(CX), AX
MULQ 8(BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += a3×b0
MOVQ 24(CX), AX
MULQ (BX)
ADDQ AX, R13
ADCQ DX, R12
// r3 += 19×a4×b4
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(BX)
ADDQ AX, R13
ADCQ DX, R12
// r4 = a0×b4
MOVQ (CX), AX
MULQ 32(BX)
MOVQ AX, R15
MOVQ DX, R14
// r4 += a1×b3
MOVQ 8(CX), AX
MULQ 24(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a2×b2
MOVQ 16(CX), AX
MULQ 16(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a3×b1
MOVQ 24(CX), AX
MULQ 8(BX)
ADDQ AX, R15
ADCQ DX, R14
// r4 += a4×b0
MOVQ 32(CX), AX
MULQ (BX)
ADDQ AX, R15
ADCQ DX, R14
// First reduction chain
MOVQ $0x0007ffffffffffff, AX
SHLQ $0x0d, DI, SI
SHLQ $0x0d, R9, R8
SHLQ $0x0d, R11, R10
SHLQ $0x0d, R13, R12
SHLQ $0x0d, R15, R14
ANDQ AX, DI
IMUL3Q $0x13, R14, R14
ADDQ R14, DI
ANDQ AX, R9
ADDQ SI, R9
ANDQ AX, R11
ADDQ R8, R11
ANDQ AX, R13
ADDQ R10, R13
ANDQ AX, R15
ADDQ R12, R15
// Second reduction chain (carryPropagate)
MOVQ DI, SI
SHRQ $0x33, SI
MOVQ R9, R8
SHRQ $0x33, R8
MOVQ R11, R10
SHRQ $0x33, R10
MOVQ R13, R12
SHRQ $0x33, R12
MOVQ R15, R14
SHRQ $0x33, R14
ANDQ AX, DI
IMUL3Q $0x13, R14, R14
ADDQ R14, DI
ANDQ AX, R9
ADDQ SI, R9
ANDQ AX, R11
ADDQ R8, R11
ANDQ AX, R13
ADDQ R10, R13
ANDQ AX, R15
ADDQ R12, R15
// Store output
MOVQ out+0(FP), AX
MOVQ DI, (AX)
MOVQ R9, 8(AX)
MOVQ R11, 16(AX)
MOVQ R13, 24(AX)
MOVQ R15, 32(AX)
RET
// func feSquare(out *Element, a *Element)
TEXT ·feSquare(SB), NOSPLIT, $0-16
MOVQ a+8(FP), CX
// r0 = l0×l0
MOVQ (CX), AX
MULQ (CX)
MOVQ AX, SI
MOVQ DX, BX
// r0 += 38×l1×l4
MOVQ 8(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, SI
ADCQ DX, BX
// r0 += 38×l2×l3
MOVQ 16(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 24(CX)
ADDQ AX, SI
ADCQ DX, BX
// r1 = 2×l0×l1
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 8(CX)
MOVQ AX, R8
MOVQ DX, DI
// r1 += 38×l2×l4
MOVQ 16(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, R8
ADCQ DX, DI
// r1 += 19×l3×l3
MOVQ 24(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 24(CX)
ADDQ AX, R8
ADCQ DX, DI
// r2 = 2×l0×l2
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 16(CX)
MOVQ AX, R10
MOVQ DX, R9
// r2 += l1×l1
MOVQ 8(CX), AX
MULQ 8(CX)
ADDQ AX, R10
ADCQ DX, R9
// r2 += 38×l3×l4
MOVQ 24(CX), AX
IMUL3Q $0x26, AX, AX
MULQ 32(CX)
ADDQ AX, R10
ADCQ DX, R9
// r3 = 2×l0×l3
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 24(CX)
MOVQ AX, R12
MOVQ DX, R11
// r3 += 2×l1×l2
MOVQ 8(CX), AX
IMUL3Q $0x02, AX, AX
MULQ 16(CX)
ADDQ AX, R12
ADCQ DX, R11
// r3 += 19×l4×l4
MOVQ 32(CX), AX
IMUL3Q $0x13, AX, AX
MULQ 32(CX)
ADDQ AX, R12
ADCQ DX, R11
// r4 = 2×l0×l4
MOVQ (CX), AX
SHLQ $0x01, AX
MULQ 32(CX)
MOVQ AX, R14
MOVQ DX, R13
// r4 += 2×l1×l3
MOVQ 8(CX), AX
IMUL3Q $0x02, AX, AX
MULQ 24(CX)
ADDQ AX, R14
ADCQ DX, R13
// r4 += l2×l2
MOVQ 16(CX), AX
MULQ 16(CX)
ADDQ AX, R14
ADCQ DX, R13
// First reduction chain
MOVQ $0x0007ffffffffffff, AX
SHLQ $0x0d, SI, BX
SHLQ $0x0d, R8, DI
SHLQ $0x0d, R10, R9
SHLQ $0x0d, R12, R11
SHLQ $0x0d, R14, R13
ANDQ AX, SI
IMUL3Q $0x13, R13, R13
ADDQ R13, SI
ANDQ AX, R8
ADDQ BX, R8
ANDQ AX, R10
ADDQ DI, R10
ANDQ AX, R12
ADDQ R9, R12
ANDQ AX, R14
ADDQ R11, R14
// Second reduction chain (carryPropagate)
MOVQ SI, BX
SHRQ $0x33, BX
MOVQ R8, DI
SHRQ $0x33, DI
MOVQ R10, R9
SHRQ $0x33, R9
MOVQ R12, R11
SHRQ $0x33, R11
MOVQ R14, R13
SHRQ $0x33, R13
ANDQ AX, SI
IMUL3Q $0x13, R13, R13
ADDQ R13, SI
ANDQ AX, R8
ADDQ BX, R8
ANDQ AX, R10
ADDQ DI, R10
ANDQ AX, R12
ADDQ R9, R12
ANDQ AX, R14
ADDQ R11, R14
// Store output
MOVQ out+0(FP), AX
MOVQ SI, (AX)
MOVQ R8, 8(AX)
MOVQ R10, 16(AX)
MOVQ R12, 24(AX)
MOVQ R14, 32(AX)
RET

12
vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || !gc || purego
// +build !amd64 !gc purego
package field
func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
func feSquare(v, x *Element) { feSquareGeneric(v, x) }

16
vendor/filippo.io/edwards25519/field/fe_arm64.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// Copyright (c) 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
// +build arm64,gc,!purego
package field
//go:noescape
func carryPropagate(v *Element)
func (v *Element) carryPropagate() *Element {
carryPropagate(v)
return v
}

42
vendor/filippo.io/edwards25519/field/fe_arm64.s generated vendored Normal file
View file

@ -0,0 +1,42 @@
// Copyright (c) 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build arm64 && gc && !purego
#include "textflag.h"
// carryPropagate works exactly like carryPropagateGeneric and uses the
// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
// avoids loading R0-R4 twice and uses LDP and STP.
//
// See https://golang.org/issues/43145 for the main compiler issue.
//
// func carryPropagate(v *Element)
TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
MOVD v+0(FP), R20
LDP 0(R20), (R0, R1)
LDP 16(R20), (R2, R3)
MOVD 32(R20), R4
AND $0x7ffffffffffff, R0, R10
AND $0x7ffffffffffff, R1, R11
AND $0x7ffffffffffff, R2, R12
AND $0x7ffffffffffff, R3, R13
AND $0x7ffffffffffff, R4, R14
ADD R0>>51, R11, R11
ADD R1>>51, R12, R12
ADD R2>>51, R13, R13
ADD R3>>51, R14, R14
// R4>>51 * 19 + R10 -> R10
LSR $51, R4, R21
MOVD $19, R22
MADD R22, R10, R21, R10
STP (R10, R11), 0(R20)
STP (R12, R13), 16(R20)
MOVD R14, 32(R20)
RET

12
vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright (c) 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !arm64 || !gc || purego
// +build !arm64 !gc purego
package field
func (v *Element) carryPropagate() *Element {
return v.carryPropagateGeneric()
}

50
vendor/filippo.io/edwards25519/field/fe_extra.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
// Copyright (c) 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package field
import "errors"
// This file contains additional functionality that is not included in the
// upstream crypto/ed25519/edwards25519/field package.
// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
// is reduced modulo the field order. If x is not of the right length,
// SetWideBytes returns nil and an error, and the receiver is unchanged.
//
// SetWideBytes is not necessary to select a uniformly distributed value, and is
// only provided for compatibility: SetBytes can be used instead as the chance
// of bias is less than 2⁻²⁵⁰.
func (v *Element) SetWideBytes(x []byte) (*Element, error) {
if len(x) != 64 {
return nil, errors.New("edwards25519: invalid SetWideBytes input size")
}
// Split the 64 bytes into two elements, and extract the most significant
// bit of each, which is ignored by SetBytes.
lo, _ := new(Element).SetBytes(x[:32])
loMSB := uint64(x[31] >> 7)
hi, _ := new(Element).SetBytes(x[32:])
hiMSB := uint64(x[63] >> 7)
// The output we want is
//
// v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
//
// which applying the reduction identity comes out to
//
// v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
//
// l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
// (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
// (hiMSB * 2 * 19²), so it fits in a uint64.
v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
v.l1 = lo.l1 + hi.l1*2*19
v.l2 = lo.l2 + hi.l2*2*19
v.l3 = lo.l3 + hi.l3*2*19
v.l4 = lo.l4 + hi.l4*2*19
return v.carryPropagate(), nil
}

266
vendor/filippo.io/edwards25519/field/fe_generic.go generated vendored Normal file
View file

@ -0,0 +1,266 @@
// Copyright (c) 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package field
import "math/bits"
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
// bits.Mul64 and bits.Add64 intrinsics.
type uint128 struct {
lo, hi uint64
}
// mul64 returns a * b.
func mul64(a, b uint64) uint128 {
hi, lo := bits.Mul64(a, b)
return uint128{lo, hi}
}
// addMul64 returns v + a * b.
func addMul64(v uint128, a, b uint64) uint128 {
hi, lo := bits.Mul64(a, b)
lo, c := bits.Add64(lo, v.lo, 0)
hi, _ = bits.Add64(hi, v.hi, c)
return uint128{lo, hi}
}
// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
func shiftRightBy51(a uint128) uint64 {
return (a.hi << (64 - 51)) | (a.lo >> 51)
}
func feMulGeneric(v, a, b *Element) {
a0 := a.l0
a1 := a.l1
a2 := a.l2
a3 := a.l3
a4 := a.l4
b0 := b.l0
b1 := b.l1
b2 := b.l2
b3 := b.l3
b4 := b.l4
// Limb multiplication works like pen-and-paper columnar multiplication, but
// with 51-bit limbs instead of digits.
//
// a4 a3 a2 a1 a0 x
// b4 b3 b2 b1 b0 =
// ------------------------
// a4b0 a3b0 a2b0 a1b0 a0b0 +
// a4b1 a3b1 a2b1 a1b1 a0b1 +
// a4b2 a3b2 a2b2 a1b2 a0b2 +
// a4b3 a3b3 a2b3 a1b3 a0b3 +
// a4b4 a3b4 a2b4 a1b4 a0b4 =
// ----------------------------------------------
// r8 r7 r6 r5 r4 r3 r2 r1 r0
//
// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
//
// Reduction can be carried out simultaneously to multiplication. For
// example, we do not compute r5: whenever the result of a multiplication
// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
//
// a4b0 a3b0 a2b0 a1b0 a0b0 +
// a3b1 a2b1 a1b1 a0b1 19×a4b1 +
// a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
// a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
// a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
// --------------------------------------
// r4 r3 r2 r1 r0
//
// Finally we add up the columns into wide, overlapping limbs.
a1_19 := a1 * 19
a2_19 := a2 * 19
a3_19 := a3 * 19
a4_19 := a4 * 19
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
r0 := mul64(a0, b0)
r0 = addMul64(r0, a1_19, b4)
r0 = addMul64(r0, a2_19, b3)
r0 = addMul64(r0, a3_19, b2)
r0 = addMul64(r0, a4_19, b1)
// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
r1 := mul64(a0, b1)
r1 = addMul64(r1, a1, b0)
r1 = addMul64(r1, a2_19, b4)
r1 = addMul64(r1, a3_19, b3)
r1 = addMul64(r1, a4_19, b2)
// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
r2 := mul64(a0, b2)
r2 = addMul64(r2, a1, b1)
r2 = addMul64(r2, a2, b0)
r2 = addMul64(r2, a3_19, b4)
r2 = addMul64(r2, a4_19, b3)
// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
r3 := mul64(a0, b3)
r3 = addMul64(r3, a1, b2)
r3 = addMul64(r3, a2, b1)
r3 = addMul64(r3, a3, b0)
r3 = addMul64(r3, a4_19, b4)
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
r4 := mul64(a0, b4)
r4 = addMul64(r4, a1, b3)
r4 = addMul64(r4, a2, b2)
r4 = addMul64(r4, a3, b1)
r4 = addMul64(r4, a4, b0)
// After the multiplication, we need to reduce (carry) the five coefficients
// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
// to respect the Element invariant.
//
// Overall, the reduction works the same as carryPropagate, except with
// wider inputs: we take the carry for each coefficient by shifting it right
// by 51, and add it to the limb above it. The top carry is multiplied by 19
// according to the reduction identity and added to the lowest limb.
//
// The largest coefficient (r0) will be at most 111 bits, which guarantees
// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
//
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
// r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
// r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
// r0 < 2⁷ × 2⁵² × 2⁵²
// r0 < 2¹¹¹
//
// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
// allows us to easily apply the reduction identity.
//
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
// r4 < 5 × 2⁵² × 2⁵²
// r4 < 2¹⁰⁷
//
c0 := shiftRightBy51(r0)
c1 := shiftRightBy51(r1)
c2 := shiftRightBy51(r2)
c3 := shiftRightBy51(r3)
c4 := shiftRightBy51(r4)
rr0 := r0.lo&maskLow51Bits + c4*19
rr1 := r1.lo&maskLow51Bits + c0
rr2 := r2.lo&maskLow51Bits + c1
rr3 := r3.lo&maskLow51Bits + c2
rr4 := r4.lo&maskLow51Bits + c3
// Now all coefficients fit into 64-bit registers but are still too large to
// be passed around as an Element. We therefore do one last carry chain,
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
}
func feSquareGeneric(v, a *Element) {
l0 := a.l0
l1 := a.l1
l2 := a.l2
l3 := a.l3
l4 := a.l4
// Squaring works precisely like multiplication above, but thanks to its
// symmetry we get to group a few terms together.
//
// l4 l3 l2 l1 l0 x
// l4 l3 l2 l1 l0 =
// ------------------------
// l4l0 l3l0 l2l0 l1l0 l0l0 +
// l4l1 l3l1 l2l1 l1l1 l0l1 +
// l4l2 l3l2 l2l2 l1l2 l0l2 +
// l4l3 l3l3 l2l3 l1l3 l0l3 +
// l4l4 l3l4 l2l4 l1l4 l0l4 =
// ----------------------------------------------
// r8 r7 r6 r5 r4 r3 r2 r1 r0
//
// l4l0 l3l0 l2l0 l1l0 l0l0 +
// l3l1 l2l1 l1l1 l0l1 19×l4l1 +
// l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
// l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
// l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
// --------------------------------------
// r4 r3 r2 r1 r0
//
// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
// only three Mul64 and four Add64, instead of five and eight.
l0_2 := l0 * 2
l1_2 := l1 * 2
l1_38 := l1 * 38
l2_38 := l2 * 38
l3_38 := l3 * 38
l3_19 := l3 * 19
l4_19 := l4 * 19
// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
r0 := mul64(l0, l0)
r0 = addMul64(r0, l1_38, l4)
r0 = addMul64(r0, l2_38, l3)
// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
r1 := mul64(l0_2, l1)
r1 = addMul64(r1, l2_38, l4)
r1 = addMul64(r1, l3_19, l3)
// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
r2 := mul64(l0_2, l2)
r2 = addMul64(r2, l1, l1)
r2 = addMul64(r2, l3_38, l4)
// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
r3 := mul64(l0_2, l3)
r3 = addMul64(r3, l1_2, l2)
r3 = addMul64(r3, l4_19, l4)
// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
r4 := mul64(l0_2, l4)
r4 = addMul64(r4, l1_2, l3)
r4 = addMul64(r4, l2, l2)
c0 := shiftRightBy51(r0)
c1 := shiftRightBy51(r1)
c2 := shiftRightBy51(r2)
c3 := shiftRightBy51(r3)
c4 := shiftRightBy51(r4)
rr0 := r0.lo&maskLow51Bits + c4*19
rr1 := r1.lo&maskLow51Bits + c0
rr2 := r2.lo&maskLow51Bits + c1
rr3 := r3.lo&maskLow51Bits + c2
rr4 := r4.lo&maskLow51Bits + c3
*v = Element{rr0, rr1, rr2, rr3, rr4}
v.carryPropagate()
}
// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
func (v *Element) carryPropagateGeneric() *Element {
c0 := v.l0 >> 51
c1 := v.l1 >> 51
c2 := v.l2 >> 51
c3 := v.l3 >> 51
c4 := v.l4 >> 51
// c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
// the final l0 will be at most 52 bits. Similarly for the rest.
v.l0 = v.l0&maskLow51Bits + c4*19
v.l1 = v.l1&maskLow51Bits + c0
v.l2 = v.l2&maskLow51Bits + c1
v.l3 = v.l3&maskLow51Bits + c2
v.l4 = v.l4&maskLow51Bits + c3
return v
}

343
vendor/filippo.io/edwards25519/scalar.go generated vendored Normal file
View file

@ -0,0 +1,343 @@
// Copyright (c) 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package edwards25519
import (
"encoding/binary"
"errors"
)
// A Scalar is an integer modulo
//
// l = 2^252 + 27742317777372353535851937790883648493
//
// which is the prime order of the edwards25519 group.
//
// This type works similarly to math/big.Int, and all arguments and
// receivers are allowed to alias.
//
// The zero value is a valid zero element.
type Scalar struct {
// s is the scalar in the Montgomery domain, in the format of the
// fiat-crypto implementation.
s fiatScalarMontgomeryDomainFieldElement
}
// The field implementation in scalar_fiat.go is generated by the fiat-crypto
// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
// from a formally verified model.
//
// fiat-crypto code comes under the following license.
//
// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// NewScalar returns a new zero Scalar.
func NewScalar() *Scalar {
return &Scalar{}
}
// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
// using Multiply and then Add.
func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
// Make a copy of z in case it aliases s.
zCopy := new(Scalar).Set(z)
return s.Multiply(x, y).Add(s, zCopy)
}
// Add sets s = x + y mod l, and returns s.
func (s *Scalar) Add(x, y *Scalar) *Scalar {
// s = 1 * x + y mod l
fiatScalarAdd(&s.s, &x.s, &y.s)
return s
}
// Subtract sets s = x - y mod l, and returns s.
func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
// s = -1 * y + x mod l
fiatScalarSub(&s.s, &x.s, &y.s)
return s
}
// Negate sets s = -x mod l, and returns s.
func (s *Scalar) Negate(x *Scalar) *Scalar {
// s = -1 * x + 0 mod l
fiatScalarOpp(&s.s, &x.s)
return s
}
// Multiply sets s = x * y mod l, and returns s.
func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
// s = x * y + 0 mod l
fiatScalarMul(&s.s, &x.s, &y.s)
return s
}
// Set sets s = x, and returns s.
func (s *Scalar) Set(x *Scalar) *Scalar {
*s = *x
return s
}
// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
// If x is not of the right length, SetUniformBytes returns nil and an error,
// and the receiver is unchanged.
//
// SetUniformBytes can be used to set s to a uniformly distributed value given
// 64 uniformly distributed random bytes.
func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
if len(x) != 64 {
return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
}
// We have a value x of 512 bits, but our fiatScalarFromBytes function
// expects an input lower than l, which is a little over 252 bits.
//
// Instead of writing a reduction function that operates on wider inputs, we
// can interpret x as the sum of three shorter values a, b, and c.
//
// x = a + b * 2^168 + c * 2^336 mod l
//
// We then precompute 2^168 and 2^336 modulo l, and perform the reduction
// with two multiplications and two additions.
s.setShortBytes(x[:21])
t := new(Scalar).setShortBytes(x[21:42])
s.Add(s, t.Multiply(t, scalarTwo168))
t.setShortBytes(x[42:])
s.Add(s, t.Multiply(t, scalarTwo336))
return s, nil
}
// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
// in the 2^256 Montgomery domain.
var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
// than 32 bytes.
func (s *Scalar) setShortBytes(x []byte) *Scalar {
if len(x) >= 32 {
panic("edwards25519: internal error: setShortBytes called with a long string")
}
var buf [32]byte
copy(buf[:], x)
fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
return s
}
// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
// returns nil and an error, and the receiver is unchanged.
func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
if len(x) != 32 {
return nil, errors.New("invalid scalar length")
}
if !isReduced(x) {
return nil, errors.New("invalid scalar encoding")
}
fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
return s, nil
}
// scalarMinusOneBytes is l - 1 in little endian.
var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
// isReduced returns whether the given scalar in 32-byte little endian encoded
// form is reduced modulo l.
func isReduced(s []byte) bool {
if len(s) != 32 {
return false
}
for i := len(s) - 1; i >= 0; i-- {
switch {
case s[i] > scalarMinusOneBytes[i]:
return false
case s[i] < scalarMinusOneBytes[i]:
return true
}
}
return true
}
// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
// Section 5.1.5 (also known as clamping) and sets s to the result. The input
// must be 32 bytes, and it is not modified. If x is not of the right length,
// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
//
// Note that since Scalar values are always reduced modulo the prime order of
// the curve, the resulting value will not preserve any of the cofactor-clearing
// properties that clamping is meant to provide. It will however work as
// expected as long as it is applied to points on the prime order subgroup, like
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
// The description above omits the purpose of the high bits of the clamping
// for brevity, but those are also lost to reductions, and are also
// irrelevant to edwards25519 as they protect against a specific
// implementation bug that was once observed in a generic Montgomery ladder.
if len(x) != 32 {
return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
}
// We need to use the wide reduction from SetUniformBytes, since clamping
// sets the 2^254 bit, making the value higher than the order.
var wideBytes [64]byte
copy(wideBytes[:], x[:])
wideBytes[0] &= 248
wideBytes[31] &= 63
wideBytes[31] |= 64
return s.SetUniformBytes(wideBytes[:])
}
// Bytes returns the canonical 32-byte little-endian encoding of s.
func (s *Scalar) Bytes() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var encoded [32]byte
return s.bytes(&encoded)
}
func (s *Scalar) bytes(out *[32]byte) []byte {
var ss fiatScalarNonMontgomeryDomainFieldElement
fiatScalarFromMontgomery(&ss, &s.s)
fiatScalarToBytes(out, (*[4]uint64)(&ss))
return out[:]
}
// Equal returns 1 if s and t are equal, and 0 otherwise.
func (s *Scalar) Equal(t *Scalar) int {
var diff fiatScalarMontgomeryDomainFieldElement
fiatScalarSub(&diff, &s.s, &t.s)
var nonzero uint64
fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
nonzero |= nonzero >> 32
nonzero |= nonzero >> 16
nonzero |= nonzero >> 8
nonzero |= nonzero >> 4
nonzero |= nonzero >> 2
nonzero |= nonzero >> 1
return int(^nonzero) & 1
}
// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
//
// w must be between 2 and 8, or nonAdjacentForm will panic.
func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
// This implementation is adapted from the one
// in curve25519-dalek and is documented there:
// https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
b := s.Bytes()
if b[31] > 127 {
panic("scalar has high bit set illegally")
}
if w < 2 {
panic("w must be at least 2 by the definition of NAF")
} else if w > 8 {
panic("NAF digits must fit in int8")
}
var naf [256]int8
var digits [5]uint64
for i := 0; i < 4; i++ {
digits[i] = binary.LittleEndian.Uint64(b[i*8:])
}
width := uint64(1 << w)
windowMask := uint64(width - 1)
pos := uint(0)
carry := uint64(0)
for pos < 256 {
indexU64 := pos / 64
indexBit := pos % 64
var bitBuf uint64
if indexBit < 64-w {
// This window's bits are contained in a single u64
bitBuf = digits[indexU64] >> indexBit
} else {
// Combine the current 64 bits with bits from the next 64
bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
}
// Add carry into the current window
window := carry + (bitBuf & windowMask)
if window&1 == 0 {
// If the window value is even, preserve the carry and continue.
// Why is the carry preserved?
// If carry == 0 and window & 1 == 0,
// then the next carry should be 0
// If carry == 1 and window & 1 == 0,
// then bit_buf & 1 == 1 so the next carry should be 1
pos += 1
continue
}
if window < width/2 {
carry = 0
naf[pos] = int8(window)
} else {
carry = 1
naf[pos] = int8(window) - int8(width)
}
pos += w
}
return naf
}
func (s *Scalar) signedRadix16() [64]int8 {
b := s.Bytes()
if b[31] > 127 {
panic("scalar has high bit set illegally")
}
var digits [64]int8
// Compute unsigned radix-16 digits:
for i := 0; i < 32; i++ {
digits[2*i] = int8(b[i] & 15)
digits[2*i+1] = int8((b[i] >> 4) & 15)
}
// Recenter coefficients:
for i := 0; i < 63; i++ {
carry := (digits[i] + 8) >> 4
digits[i] -= carry << 4
digits[i+1] += carry
}
return digits
}

1147
vendor/filippo.io/edwards25519/scalar_fiat.go generated vendored Normal file

File diff suppressed because it is too large Load diff

214
vendor/filippo.io/edwards25519/scalarmult.go generated vendored Normal file
View file

@ -0,0 +1,214 @@
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package edwards25519
import "sync"
// basepointTable is a set of 32 affineLookupTables, where table i is generated
// from 256i * basepoint. It is precomputed the first time it's used.
func basepointTable() *[32]affineLookupTable {
basepointTablePrecomp.initOnce.Do(func() {
p := NewGeneratorPoint()
for i := 0; i < 32; i++ {
basepointTablePrecomp.table[i].FromP3(p)
for j := 0; j < 8; j++ {
p.Add(p, p)
}
}
})
return &basepointTablePrecomp.table
}
var basepointTablePrecomp struct {
table [32]affineLookupTable
initOnce sync.Once
}
// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
// returns v.
//
// The scalar multiplication is done in constant time.
func (v *Point) ScalarBaseMult(x *Scalar) *Point {
basepointTable := basepointTable()
// Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
// as described in the Ed25519 paper
//
// Group even and odd coefficients
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
// + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
// + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
//
// We use a lookup table for each i to get x_i*16^(2*i)*B
// and do four doublings to multiply by 16.
digits := x.signedRadix16()
multiple := &affineCached{}
tmp1 := &projP1xP1{}
tmp2 := &projP2{}
// Accumulate the odd components first
v.Set(NewIdentityPoint())
for i := 1; i < 64; i += 2 {
basepointTable[i/2].SelectInto(multiple, digits[i])
tmp1.AddAffine(v, multiple)
v.fromP1xP1(tmp1)
}
// Multiply by 16
tmp2.FromP3(v) // tmp2 = v in P2 coords
tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
v.fromP1xP1(tmp1) // now v = 16*(odd components)
// Accumulate the even components
for i := 0; i < 64; i += 2 {
basepointTable[i/2].SelectInto(multiple, digits[i])
tmp1.AddAffine(v, multiple)
v.fromP1xP1(tmp1)
}
return v
}
// ScalarMult sets v = x * q, and returns v.
//
// The scalar multiplication is done in constant time.
func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
checkInitialized(q)
var table projLookupTable
table.FromP3(q)
// Write x = sum(x_i * 16^i)
// so x*Q = sum( Q*x_i*16^i )
// = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
// <------compute inside out---------
//
// We use the lookup table to get the x_i*Q values
// and do four doublings to compute 16*Q
digits := x.signedRadix16()
// Unwrap first loop iteration to save computing 16*identity
multiple := &projCached{}
tmp1 := &projP1xP1{}
tmp2 := &projP2{}
table.SelectInto(multiple, digits[63])
v.Set(NewIdentityPoint())
tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
for i := 62; i >= 0; i-- {
tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
table.SelectInto(multiple, digits[i])
tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
}
v.fromP1xP1(tmp1)
return v
}
// basepointNafTable is the nafLookupTable8 for the basepoint.
// It is precomputed the first time it's used.
func basepointNafTable() *nafLookupTable8 {
basepointNafTablePrecomp.initOnce.Do(func() {
basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
})
return &basepointNafTablePrecomp.table
}
var basepointNafTablePrecomp struct {
table nafLookupTable8
initOnce sync.Once
}
// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
// generator, and returns v.
//
// Execution time depends on the inputs.
func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
checkInitialized(A)
// Similarly to the single variable-base approach, we compute
// digits and use them with a lookup table. However, because
// we are allowed to do variable-time operations, we don't
// need constant-time lookups or constant-time digit
// computations.
//
// So we use a non-adjacent form of some width w instead of
// radix 16. This is like a binary representation (one digit
// for each binary place) but we allow the digits to grow in
// magnitude up to 2^{w-1} so that the nonzero digits are as
// sparse as possible. Intuitively, this "condenses" the
// "mass" of the scalar onto sparse coefficients (meaning
// fewer additions).
basepointNafTable := basepointNafTable()
var aTable nafLookupTable5
aTable.FromP3(A)
// Because the basepoint is fixed, we can use a wider NAF
// corresponding to a bigger table.
aNaf := a.nonAdjacentForm(5)
bNaf := b.nonAdjacentForm(8)
// Find the first nonzero coefficient.
i := 255
for j := i; j >= 0; j-- {
if aNaf[j] != 0 || bNaf[j] != 0 {
break
}
}
multA := &projCached{}
multB := &affineCached{}
tmp1 := &projP1xP1{}
tmp2 := &projP2{}
tmp2.Zero()
// Move from high to low bits, doubling the accumulator
// at each iteration and checking whether there is a nonzero
// coefficient to look up a multiple of.
for ; i >= 0; i-- {
tmp1.Double(tmp2)
// Only update v if we have a nonzero coeff to add in.
if aNaf[i] > 0 {
v.fromP1xP1(tmp1)
aTable.SelectInto(multA, aNaf[i])
tmp1.Add(v, multA)
} else if aNaf[i] < 0 {
v.fromP1xP1(tmp1)
aTable.SelectInto(multA, -aNaf[i])
tmp1.Sub(v, multA)
}
if bNaf[i] > 0 {
v.fromP1xP1(tmp1)
basepointNafTable.SelectInto(multB, bNaf[i])
tmp1.AddAffine(v, multB)
} else if bNaf[i] < 0 {
v.fromP1xP1(tmp1)
basepointNafTable.SelectInto(multB, -bNaf[i])
tmp1.SubAffine(v, multB)
}
tmp2.FromP1xP1(tmp1)
}
v.fromP2(tmp2)
return v
}

129
vendor/filippo.io/edwards25519/tables.go generated vendored Normal file
View file

@ -0,0 +1,129 @@
// Copyright (c) 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package edwards25519
import (
"crypto/subtle"
)
// A dynamic lookup table for variable-base, constant-time scalar muls.
type projLookupTable struct {
points [8]projCached
}
// A precomputed lookup table for fixed-base, constant-time scalar muls.
type affineLookupTable struct {
points [8]affineCached
}
// A dynamic lookup table for variable-base, variable-time scalar muls.
type nafLookupTable5 struct {
points [8]projCached
}
// A precomputed lookup table for fixed-base, variable-time scalar muls.
type nafLookupTable8 struct {
points [64]affineCached
}
// Constructors.
// Builds a lookup table at runtime. Fast.
func (v *projLookupTable) FromP3(q *Point) {
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
v.points[0].FromP3(q)
tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 7; i++ {
// Compute (i+1)*Q as Q + i*Q and convert to a projCached
// This is needlessly complicated because the API has explicit
// receivers instead of creating stack objects and relying on RVO
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
}
}
// This is not optimised for speed; fixed-base tables should be precomputed.
func (v *affineLookupTable) FromP3(q *Point) {
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
v.points[0].FromP3(q)
tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 7; i++ {
// Compute (i+1)*Q as Q + i*Q and convert to affineCached
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
}
}
// Builds a lookup table at runtime. Fast.
func (v *nafLookupTable5) FromP3(q *Point) {
// Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
// This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
v.points[0].FromP3(q)
q2 := Point{}
q2.Add(q, q)
tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 7; i++ {
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
}
}
// This is not optimised for speed; fixed-base tables should be precomputed.
func (v *nafLookupTable8) FromP3(q *Point) {
v.points[0].FromP3(q)
q2 := Point{}
q2.Add(q, q)
tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 63; i++ {
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
}
}
// Selectors.
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
// Compute xabs = |x|
xmask := x >> 7
xabs := uint8((x + xmask) ^ xmask)
dest.Zero()
for j := 1; j <= 8; j++ {
// Set dest = j*Q if |x| = j
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
dest.Select(&v.points[j-1], dest, cond)
}
// Now dest = |x|*Q, conditionally negate to get x*Q
dest.CondNeg(int(xmask & 1))
}
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
// Compute xabs = |x|
xmask := x >> 7
xabs := uint8((x + xmask) ^ xmask)
dest.Zero()
for j := 1; j <= 8; j++ {
// Set dest = j*Q if |x| = j
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
dest.Select(&v.points[j-1], dest, cond)
}
// Now dest = |x|*Q, conditionally negate to get x*Q
dest.CondNeg(int(xmask & 1))
}
// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
*dest = v.points[x/2]
}
// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
*dest = v.points[x/2]
}

View file

@ -13,7 +13,7 @@ import (
"sync"
"time"
"github.com/google/go-github/v57/github"
"github.com/google/go-github/v60/github"
)
const (

View file

@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
- [Ristretto](https://github.com/dgraph-io/ristretto)
- [Badger](https://github.com/dgraph-io/badger)

View file

@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
// contiguous array of the assembly code.
// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
//
// Note that a zero-valued Digest is not ready to receive writes.
// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
// New creates a new Digest with a zero seed.
func New() *Digest {
return NewWithSeed(0)
}
// NewWithSeed creates a new Digest with the given seed.
func NewWithSeed(seed uint64) *Digest {
var d Digest
d.Reset()
d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
// It uses a seed value of zero.
func (d *Digest) Reset() {
d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -primes[0]
d.ResetWithSeed(0)
}
// ResetWithSeed clears the Digest's state so that it can be reused.
// It uses the given seed to initialize the state.
func (d *Digest) ResetWithSeed(seed uint64) {
d.v1 = seed + prime1 + prime2
d.v2 = seed + prime2
d.v3 = seed
d.v4 = seed - prime1
d.total = 0
d.n = 0
}

View file

@ -6,7 +6,7 @@
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64

View file

@ -3,7 +3,7 @@
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()

View file

@ -5,7 +5,7 @@
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}

View file

@ -33,7 +33,7 @@ import (
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))

View file

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
alternative API, with [interoperability](#slog-interoperability) provided by
some conversion functions.
### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
slog API.
## Using a `logr.Sink` as backend for slog
### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
- Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
### Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
automatically convert the `slog.Logger` to a
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
With this approach, binaries which use either slog or logr are as efficient as
possible with no unnecessary allocations. This is also why the API stores a
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
The downside is that switching back and forth needs more allocations. Because
logr is the API that is already in use by different packages, in particular
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View file

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View file

@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
// LogInfoLevel tells funcr what key to use to log the info level.
// If not specified, the info level will be logged as "level".
// If this is set to "", the info level will not be logged at all.
LogInfoLevel *string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
if opts.LogInfoLevel == nil {
opts.LogInfoLevel = new(string)
*opts.LogInfoLevel = "level"
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
@ -231,8 +240,11 @@ type Formatter struct {
prefix string
values []any
valuesStr string
parentValuesStr string
depth int
opts *Options
group string // for slog groups
groupDepth int
}
// outputFormat indicates which outputFormat to use.
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
buf.WriteByte('{') // for the whole line
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if f.parentValuesStr != "" {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
buf.WriteByte(f.comma())
}
buf.WriteString(f.parentValuesStr)
continuing = true
buf.WriteString(f.valuesStr)
}
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
}
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
}
return buf.String()
}
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
if !copied {
newList := make([]any, len(kvList))
copy(newList, kvList)
kvList = newList
copied = true
}
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.quoted(k, escapeKeys))
buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) quoted(str string, escape bool) string {
if escape {
return prettyString(str)
}
// this is faster
return `"` + str + `"`
}
func (f Formatter) comma() byte {
if f.outputFormat == outputJSON {
return ','
}
return ' '
}
func (f Formatter) colon() byte {
if f.outputFormat == outputJSON {
return ':'
}
return '='
}
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
continue
}
if printComma {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.quoted(name, false))
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
return kvList
}
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
// Unnamed groups are just inlined.
if group == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
// Start collecting new values.
f.group = group
f.groupDepth++
f.valuesStr = ""
f.values = nil
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
if key := *f.opts.LogInfoLevel; key != "" {
args = append(args, key, level)
}
args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}

105
vendor/github.com/go-logr/logr/funcr/slogsink.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package funcr
import (
"context"
"log/slog"
"github.com/go-logr/logr"
)
var _ logr.SlogSink = &fnlogger{}
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, kvList)
return true
})
if record.Level >= slog.LevelError {
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
}
return nil
}
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, kvList)
}
l.AddValues(kvList)
return &l
}
func (l fnlogger) WithGroup(name string) logr.SlogSink {
l.startGroup(name)
return &l
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, grpKVs)
}
if attr.Key == "" {
// slog says we have to inline these
kvList = append(kvList, grpKVs...)
} else {
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
}
} else if attr.Key != "" {
kvList = append(kvList, attr.Key, attrVal.Any())
}
return kvList
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l fnlogger) levelFromSlog(level slog.Level) int {
result := -level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

View file

@ -207,10 +207,6 @@ limitations under the License.
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View file

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View file

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View file

@ -267,6 +267,12 @@ func nameInlinedSchemas(opts *FlattenOpts) error {
}
func removeUnused(opts *FlattenOpts) {
for removeUnusedSinglePass(opts) {
// continue until no unused definition remains
}
}
func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) {
expected := make(map[string]struct{})
for k := range opts.Swagger().Definitions {
expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{}
@ -277,6 +283,7 @@ func removeUnused(opts *FlattenOpts) {
}
for k := range expected {
hasRemoved = true
debugLog("removing unused definition %s", path.Base(k))
if opts.Verbose {
log.Printf("info: removing unused definition: %s", path.Base(k))
@ -285,6 +292,8 @@ func removeUnused(opts *FlattenOpts) {
}
opts.Spec.reload() // re-analyze
return hasRemoved
}
func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error {
@ -331,7 +340,7 @@ func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) err
}
// generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name
newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref))
newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts))
debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen)
opts.flattenContext.resolved[refStr] = newName
@ -649,6 +658,7 @@ func namePointers(opts *FlattenOpts) error {
refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas))
for k, ref := range opts.Spec.references.allRefs {
debugLog("name pointers: %q => %#v", k, ref)
if path.Dir(ref.String()) == definitionsPath {
// this a ref to a top-level definition: ok
continue
@ -766,6 +776,10 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
// identifying edge case when the namer did nothing because we point to a non-schema object
// no definition is created and we expand the $ref for all callers
debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t",
asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(),
)
if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() {
debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String())
if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil {
@ -788,6 +802,7 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema
return nil
}
// everything that is a simple schema and not factorizable is expanded
debugLog("expand JSON pointer for key=%s", key)
if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil {

View file

@ -33,12 +33,14 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana
}
// create unique name
newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name))
mangle := mangler(isn.opts)
newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name))
// clone schema
sch := schutils.Clone(schema)
// replace values on schema
debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName)
if err := replace.RewriteSchemaToRef(isn.Spec, key,
spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil {
return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err)
@ -149,13 +151,15 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
startIndex int
)
if parts.IsOperation() {
switch {
case parts.IsOperation():
baseNames, startIndex = namesForOperation(parts, operations)
}
// definitions
if parts.IsDefinition() {
case parts.IsDefinition():
baseNames, startIndex = namesForDefinition(parts)
default:
// this a non-standard pointer: build a name by concatenating its parts
baseNames = [][]string{parts}
startIndex = len(baseNames) + 1
}
result := make([]string, 0, len(baseNames))
@ -169,6 +173,7 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma
}
sort.Strings(result)
debugLog("names from parts: %v => %v", parts, result)
return result
}
@ -256,10 +261,20 @@ func partAdder(aschema *AnalyzedSchema) sortref.PartAdder {
}
}
func nameFromRef(ref spec.Ref) string {
func mangler(o *FlattenOpts) func(string) string {
if o.KeepNames {
return func(in string) string { return in }
}
return swag.ToJSONName
}
func nameFromRef(ref spec.Ref, o *FlattenOpts) string {
mangle := mangler(o)
u := ref.GetURL()
if u.Fragment != "" {
return swag.ToJSONName(path.Base(u.Fragment))
return mangle(path.Base(u.Fragment))
}
if u.Path != "" {
@ -267,14 +282,14 @@ func nameFromRef(ref spec.Ref) string {
if bn != "" && bn != "/" {
ext := path.Ext(bn)
if ext != "" {
return swag.ToJSONName(bn[:len(bn)-len(ext)])
return mangle(bn[:len(bn)-len(ext)])
}
return swag.ToJSONName(bn)
return mangle(bn)
}
}
return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " "))
return mangle(strings.ReplaceAll(u.Host, ".", " "))
}
// GenLocation indicates from which section of the specification (models or operations) a definition has been created.

View file

@ -26,6 +26,7 @@ type FlattenOpts struct {
Verbose bool // enable some reporting on possible name conflicts detected
RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening
ContinueOnError bool // Continue when spec expansion issues are found
KeepNames bool // Do not attempt to jsonify names from references when flattening
/* Extra keys */
_ struct{} // require keys

View file

@ -29,7 +29,7 @@ var (
// GetLogger provides a prefix debug logger
func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
if debug {
logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags)
logger := log.New(output, prefix+":", log.LstdFlags)
return func(msg string, args ...interface{}) {
_, file1, pos1, _ := runtime.Caller(1)
@ -37,5 +37,5 @@ func GetLogger(prefix string, debug bool) func(string, ...interface{}) {
}
}
return func(msg string, args ...interface{}) {}
return func(_ string, _ ...interface{}) {}
}

View file

@ -1,6 +1,7 @@
package replace
import (
"encoding/json"
"fmt"
"net/url"
"os"
@ -40,6 +41,8 @@ func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error {
if refable.Schema != nil {
refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
}
case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{}
return rewriteParentRef(sp, key, ref)
default:
return fmt.Errorf("no schema with ref found at %s for %T", key, value)
}
@ -120,6 +123,9 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error {
case spec.SchemaProperties:
container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
case *interface{}:
*container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}}
// NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema
default:
@ -385,8 +391,9 @@ DOWNREF:
err := asSchema.UnmarshalJSON(asJSON)
if err != nil {
return nil,
fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
currentRef.String(), value)
fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
currentRef.String(), value, err,
)
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String()))
@ -402,8 +409,9 @@ DOWNREF:
var asSchema spec.Schema
if err := asSchema.UnmarshalJSON(asJSON); err != nil {
return nil,
fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T",
currentRef.String(), value)
fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)",
currentRef.String(), value, err,
)
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String()))
@ -414,9 +422,25 @@ DOWNREF:
currentRef = asSchema.Ref
default:
// fallback: attempts to resolve the pointer as a schema
if refable == nil {
break DOWNREF
}
asJSON, _ := json.Marshal(refable)
var asSchema spec.Schema
if err := asSchema.UnmarshalJSON(asJSON); err != nil {
return nil,
fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T",
currentRef.String(), value)
fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)",
currentRef.String(), value, err,
)
}
warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable))
if asSchema.Ref.String() == "" {
break DOWNREF
}
currentRef = asSchema.Ref
}
}

View file

@ -69,7 +69,7 @@ func KeyParts(key string) SplitKey {
return res
}
// SplitKey holds of the parts of a /-separated key, soi that their location may be determined.
// SplitKey holds of the parts of a /-separated key, so that their location may be determined.
type SplitKey []string
// IsDefinition is true when the split key is in the #/definitions section of a spec

View file

@ -53,7 +53,7 @@ import (
// collisions.
func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped := make([]string, 0, len(mixins))
opIds := getOpIds(primary)
opIDs := getOpIDs(primary)
initPrimary(primary)
for i, m := range mixins {
@ -74,7 +74,7 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
skipped = append(skipped, mergeDefinitions(primary, m)...)
// merging paths requires a map of operationIDs to work with
skipped = append(skipped, mergePaths(primary, m, opIds, i)...)
skipped = append(skipped, mergePaths(primary, m, opIDs, i)...)
skipped = append(skipped, mergeParameters(primary, m)...)
@ -84,9 +84,9 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string {
return skipped
}
// getOpIds extracts all the paths.<path>.operationIds from the given
// getOpIDs extracts all the paths.<path>.operationIds from the given
// spec and returns them as the keys in a map with 'true' values.
func getOpIds(s *spec.Swagger) map[string]bool {
func getOpIDs(s *spec.Swagger) map[string]bool {
rv := make(map[string]bool)
if s.Paths == nil {
return rv
@ -179,7 +179,7 @@ func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string)
return
}
func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) {
func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) {
if m.Paths != nil {
for k, v := range m.Paths.Paths {
if _, exists := primary.Paths.Paths[k]; exists {
@ -198,10 +198,10 @@ func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, m
// all the proivded specs are already unique.
piops := pathItemOps(v)
for _, piop := range piops {
if opIds[piop.ID] {
if opIDs[piop.ID] {
piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex)
}
opIds[piop.ID] = true
opIDs[piop.ID] = true
}
primary.Paths.Paths[k] = v
}

View file

@ -1,7 +1,7 @@
package analysis
import (
"fmt"
"errors"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
@ -19,7 +19,7 @@ type SchemaOpts struct {
// patterns.
func Schema(opts SchemaOpts) (*AnalyzedSchema, error) {
if opts.Schema == nil {
return nil, fmt.Errorf("no schema to analyze")
return nil, errors.New("no schema to analyze")
}
a := &AnalyzedSchema{

View file

@ -110,16 +110,36 @@ func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
func isNil(input any) bool {
if input == nil {
return true
}
kind := reflect.TypeOf(input).Kind()
switch kind { //nolint:exhaustive
case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
return reflect.ValueOf(input).IsNil()
default:
return false
}
}
func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
if isNil(node) {
return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
}
if rValue.Type().Implements(jsonPointableType) {
r, err := node.(JSONPointable).JSONLookup(decodedToken)
switch typed := node.(type) {
case JSONPointable:
r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
return getSingleImpl(*typed, decodedToken, nameProvider)
}
switch kind { //nolint:exhaustive
@ -244,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {

View file

@ -1,4 +1,4 @@
# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/lods)
# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads)

View file

@ -1,3 +0,0 @@
[x] why not filepath in JSONDoc()
[] integration tests package
[] relint

View file

@ -21,7 +21,7 @@ var (
func init() {
jsonLoader := &loader{
DocLoaderWithMatch: DocLoaderWithMatch{
Match: func(pth string) bool {
Match: func(_ string) bool {
return true
},
Fn: JSONDoc,

View file

@ -248,7 +248,8 @@ func (d *Document) ResetDefinitions() *Document {
// Pristine creates a new pristine document instance based on the input data
func (d *Document) Pristine() *Document {
dd, _ := Analyzed(d.Raw(), d.Version())
raw, _ := json.Marshal(d.Spec())
dd, _ := Analyzed(raw, d.Version())
dd.pathLoader = d.pathLoader
dd.specFilePath = d.specFilePath

View file

@ -38,9 +38,16 @@ type byteStreamOpts struct {
Close bool
}
// ByteStreamConsumer creates a consumer for byte streams,
// takes a Writer/BinaryUnmarshaler interface or binary slice by reference,
// and reads from the provided reader
// ByteStreamConsumer creates a consumer for byte streams.
//
// The consumer consumes from a provided reader into the data passed by reference.
//
// Supported output underlying types and interfaces, prioritized in this order:
// - io.ReaderFrom (for maximum control)
// - io.Writer (performs io.Copy)
// - encoding.BinaryUnmarshaler
// - *string
// - *[]byte
func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
var vals byteStreamOpts
for _, opt := range opts {
@ -51,10 +58,13 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
if reader == nil {
return errors.New("ByteStreamConsumer requires a reader") // early exit
}
if data == nil {
return errors.New("nil destination for ByteStreamConsumer")
}
closer := defaultCloser
if vals.Close {
if cl, ok := reader.(io.Closer); ok {
if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
closer = cl.Close
}
}
@ -62,34 +72,56 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
_ = closer()
}()
if wrtr, ok := data.(io.Writer); ok {
_, err := io.Copy(wrtr, reader)
if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom {
_, err := readerFrom.ReadFrom(reader)
return err
}
buf := new(bytes.Buffer)
if writer, isDataWriter := data.(io.Writer); isDataWriter {
_, err := io.Copy(writer, reader)
return err
}
// buffers input before writing to data
var buf bytes.Buffer
_, err := buf.ReadFrom(reader)
if err != nil {
return err
}
b := buf.Bytes()
if bu, ok := data.(encoding.BinaryUnmarshaler); ok {
return bu.UnmarshalBinary(b)
}
switch destinationPointer := data.(type) {
case encoding.BinaryUnmarshaler:
return destinationPointer.UnmarshalBinary(b)
case *any:
switch (*destinationPointer).(type) {
case string:
*destinationPointer = string(b)
return nil
case []byte:
*destinationPointer = b
if data != nil {
if str, ok := data.(*string); ok {
*str = string(b)
return nil
}
default:
// check for the underlying type to be pointer to []byte or string,
if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
return errors.New("destination must be a pointer")
}
if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr {
v := reflect.Indirect(reflect.ValueOf(data))
if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
t := v.Type()
switch {
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
v.SetBytes(b)
return nil
case t.Kind() == reflect.String:
v.SetString(string(b))
return nil
}
}
@ -98,21 +130,35 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer {
})
}
// ByteStreamProducer creates a producer for byte streams,
// takes a Reader/BinaryMarshaler interface or binary slice,
// and writes to a writer (essentially a pipe)
// ByteStreamProducer creates a producer for byte streams.
//
// The producer takes input data then writes to an output writer (essentially as a pipe).
//
// Supported input underlying types and interfaces, prioritized in this order:
// - io.WriterTo (for maximum control)
// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting.
// - encoding.BinaryMarshaler
// - error (writes as a string)
// - []byte
// - string
// - struct, other slices: writes as JSON
func ByteStreamProducer(opts ...byteStreamOpt) Producer {
var vals byteStreamOpts
for _, opt := range opts {
opt(&vals)
}
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("ByteStreamProducer requires a writer") // early exit
}
if data == nil {
return errors.New("nil data for ByteStreamProducer")
}
closer := defaultCloser
if vals.Close {
if cl, ok := writer.(io.Closer); ok {
if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
closer = cl.Close
}
}
@ -120,46 +166,51 @@ func ByteStreamProducer(opts ...byteStreamOpt) Producer {
_ = closer()
}()
if rc, ok := data.(io.ReadCloser); ok {
if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
defer rc.Close()
}
if rdr, ok := data.(io.Reader); ok {
_, err := io.Copy(writer, rdr)
switch origin := data.(type) {
case io.WriterTo:
_, err := origin.WriteTo(writer)
return err
}
if bm, ok := data.(encoding.BinaryMarshaler); ok {
bytes, err := bm.MarshalBinary()
case io.Reader:
_, err := io.Copy(writer, origin)
return err
case encoding.BinaryMarshaler:
bytes, err := origin.MarshalBinary()
if err != nil {
return err
}
_, err = writer.Write(bytes)
return err
}
if data != nil {
if str, ok := data.(string); ok {
_, err := writer.Write([]byte(str))
case error:
_, err := writer.Write([]byte(origin.Error()))
return err
}
if e, ok := data.(error); ok {
_, err := writer.Write([]byte(e.Error()))
return err
}
default:
v := reflect.Indirect(reflect.ValueOf(data))
if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
t := v.Type()
switch {
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
_, err := writer.Write(v.Bytes())
return err
}
if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice {
case t.Kind() == reflect.String:
_, err := writer.Write([]byte(v.String()))
return err
case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice:
b, err := swag.WriteJSON(data)
if err != nil {
return err
}
_, err = writer.Write(b)
return err
}

View file

@ -36,7 +36,7 @@ import (
)
// NewRequest creates a new swagger http client request
func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) {
func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request {
return &request{
pathPattern: pathPattern,
method: method,
@ -45,7 +45,7 @@ func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter)
query: make(url.Values),
timeout: DefaultTimeout,
getBody: getRequestBuffer,
}, nil
}
}
// Request represents a swagger client request.

View file

@ -22,6 +22,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"mime"
"net/http"
@ -31,12 +32,13 @@ import (
"sync"
"time"
"github.com/go-openapi/strfmt"
"github.com/opentracing/opentracing-go"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/runtime/yamlpc"
"github.com/go-openapi/strfmt"
"github.com/opentracing/opentracing-go"
)
const (
@ -142,7 +144,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
return nil, fmt.Errorf("tls client priv key: %v", err)
}
default:
return nil, fmt.Errorf("tls client priv key: unsupported key type")
return nil, errors.New("tls client priv key: unsupported key type")
}
block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}
@ -378,14 +380,11 @@ func (r *Runtime) EnableConnectionReuse() {
func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive,stylecheck
params, _, auth := operation.Params, operation.Reader, operation.AuthInfo
request, err := newRequest(operation.Method, operation.PathPattern, params)
if err != nil {
return nil, nil, err
}
request := newRequest(operation.Method, operation.PathPattern, params)
var accept []string
accept = append(accept, operation.ProducesMediaTypes...)
if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
return nil, nil, err
}
@ -457,27 +456,36 @@ func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error
r.logger.Debugf("%s\n", string(b))
}
var hasTimeout bool
pctx := operation.Context
if pctx == nil {
pctx = r.Context
} else {
hasTimeout = true
var parentCtx context.Context
switch {
case operation.Context != nil:
parentCtx = operation.Context
case r.Context != nil:
parentCtx = r.Context
default:
parentCtx = context.Background()
}
if pctx == nil {
pctx = context.Background()
}
var ctx context.Context
var cancel context.CancelFunc
if hasTimeout {
ctx, cancel = context.WithCancel(pctx)
var (
ctx context.Context
cancel context.CancelFunc
)
if request.timeout == 0 {
// There may be a deadline in the context passed to the operation.
// Otherwise, there is no timeout set.
ctx, cancel = context.WithCancel(parentCtx)
} else {
ctx, cancel = context.WithTimeout(pctx, request.timeout)
// Sets the timeout passed from request params (by default runtime.DefaultTimeout).
// If there is already a deadline in the parent context, the shortest will
// apply.
ctx, cancel = context.WithTimeout(parentCtx, request.timeout)
}
defer cancel()
client := operation.Client
if client == nil {
var client *http.Client
if operation.Client != nil {
client = operation.Client
} else {
client = r.client
}
req = req.WithContext(ctx)

View file

@ -16,62 +16,335 @@ package runtime
import (
"bytes"
"context"
"encoding"
"encoding/csv"
"errors"
"fmt"
"io"
"reflect"
"golang.org/x/sync/errgroup"
)
// CSVConsumer creates a new CSV consumer
func CSVConsumer() Consumer {
// CSVConsumer creates a new CSV consumer.
//
// The consumer consumes CSV records from a provided reader into the data passed by reference.
//
// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...).
// The defaults are those of the standard library's csv.Reader and csv.Writer.
//
// Supported output underlying types and interfaces, prioritized in this order:
// - *csv.Writer
// - CSVWriter (writer options are ignored)
// - io.Writer (as raw bytes)
// - io.ReaderFrom (as raw bytes)
// - encoding.BinaryUnmarshaler (as raw bytes)
// - *[][]string (as a collection of records)
// - *[]byte (as raw bytes)
// - *string (a raw bytes)
//
// The consumer prioritizes situations where buffering the input is not required.
func CSVConsumer(opts ...CSVOpt) Consumer {
o := csvOptsWithDefaults(opts)
return ConsumerFunc(func(reader io.Reader, data interface{}) error {
if reader == nil {
return errors.New("CSVConsumer requires a reader")
}
if data == nil {
return errors.New("nil destination for CSVConsumer")
}
csvReader := csv.NewReader(reader)
writer, ok := data.(io.Writer)
if !ok {
return errors.New("data type must be io.Writer")
o.applyToReader(csvReader)
closer := defaultCloser
if o.closeStream {
if cl, isReaderCloser := reader.(io.Closer); isReaderCloser {
closer = cl.Close
}
csvWriter := csv.NewWriter(writer)
records, err := csvReader.ReadAll()
if err != nil {
}
defer func() {
_ = closer()
}()
switch destination := data.(type) {
case *csv.Writer:
csvWriter := destination
o.applyToWriter(csvWriter)
return pipeCSV(csvWriter, csvReader, o)
case CSVWriter:
csvWriter := destination
// no writer options available
return pipeCSV(csvWriter, csvReader, o)
case io.Writer:
csvWriter := csv.NewWriter(destination)
o.applyToWriter(csvWriter)
return pipeCSV(csvWriter, csvReader, o)
case io.ReaderFrom:
var buf bytes.Buffer
csvWriter := csv.NewWriter(&buf)
o.applyToWriter(csvWriter)
if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
return err
}
for _, r := range records {
if err := csvWriter.Write(r); err != nil {
_, err := destination.ReadFrom(&buf)
return err
case encoding.BinaryUnmarshaler:
var buf bytes.Buffer
csvWriter := csv.NewWriter(&buf)
o.applyToWriter(csvWriter)
if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
return err
}
return destination.UnmarshalBinary(buf.Bytes())
default:
// support *[][]string, *[]byte, *string
if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr {
return errors.New("destination must be a pointer")
}
csvWriter.Flush()
v := reflect.Indirect(reflect.ValueOf(data))
t := v.Type()
switch {
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
csvWriter := &csvRecordsWriter{}
// writer options are ignored
if err := pipeCSV(csvWriter, csvReader, o); err != nil {
return err
}
v.Grow(len(csvWriter.records))
v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity
v.SetLen(len(csvWriter.records))
reflect.Copy(v, reflect.ValueOf(csvWriter.records))
return nil
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
var buf bytes.Buffer
csvWriter := csv.NewWriter(&buf)
o.applyToWriter(csvWriter)
if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
return err
}
v.SetBytes(buf.Bytes())
return nil
case t.Kind() == reflect.String:
var buf bytes.Buffer
csvWriter := csv.NewWriter(&buf)
o.applyToWriter(csvWriter)
if err := bufferedCSV(csvWriter, csvReader, o); err != nil {
return err
}
v.SetString(buf.String())
return nil
default:
return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s",
data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface",
)
}
}
})
}
// CSVProducer creates a new CSV producer
func CSVProducer() Producer {
// CSVProducer creates a new CSV producer.
//
// The producer takes input data then writes as CSV to an output writer (essentially as a pipe).
//
// Supported input underlying types and interfaces, prioritized in this order:
// - *csv.Reader
// - CSVReader (reader options are ignored)
// - io.Reader
// - io.WriterTo
// - encoding.BinaryMarshaler
// - [][]string
// - []byte
// - string
//
// The producer prioritizes situations where buffering the input is not required.
func CSVProducer(opts ...CSVOpt) Producer {
o := csvOptsWithDefaults(opts)
return ProducerFunc(func(writer io.Writer, data interface{}) error {
if writer == nil {
return errors.New("CSVProducer requires a writer")
}
dataBytes, ok := data.([]byte)
if !ok {
return errors.New("data type must be byte array")
if data == nil {
return errors.New("nil data for CSVProducer")
}
csvWriter := csv.NewWriter(writer)
o.applyToWriter(csvWriter)
closer := defaultCloser
if o.closeStream {
if cl, isWriterCloser := writer.(io.Closer); isWriterCloser {
closer = cl.Close
}
}
defer func() {
_ = closer()
}()
if rc, isDataCloser := data.(io.ReadCloser); isDataCloser {
defer rc.Close()
}
switch origin := data.(type) {
case *csv.Reader:
csvReader := origin
o.applyToReader(csvReader)
return pipeCSV(csvWriter, csvReader, o)
case CSVReader:
csvReader := origin
// no reader options available
return pipeCSV(csvWriter, csvReader, o)
case io.Reader:
csvReader := csv.NewReader(origin)
o.applyToReader(csvReader)
return pipeCSV(csvWriter, csvReader, o)
case io.WriterTo:
// async piping of the writes performed by WriteTo
r, w := io.Pipe()
csvReader := csv.NewReader(r)
o.applyToReader(csvReader)
pipe, _ := errgroup.WithContext(context.Background())
pipe.Go(func() error {
_, err := origin.WriteTo(w)
_ = w.Close()
return err
})
pipe.Go(func() error {
defer func() {
_ = r.Close()
}()
return pipeCSV(csvWriter, csvReader, o)
})
return pipe.Wait()
case encoding.BinaryMarshaler:
buf, err := origin.MarshalBinary()
if err != nil {
return err
}
rdr := bytes.NewBuffer(buf)
csvReader := csv.NewReader(rdr)
return bufferedCSV(csvWriter, csvReader, o)
default:
// support [][]string, []byte, string (or pointers to those)
v := reflect.Indirect(reflect.ValueOf(data))
t := v.Type()
switch {
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String:
csvReader := &csvRecordsWriter{
records: make([][]string, v.Len()),
}
reflect.Copy(reflect.ValueOf(csvReader.records), v)
return pipeCSV(csvWriter, csvReader, o)
case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8:
buf := bytes.NewBuffer(v.Bytes())
csvReader := csv.NewReader(buf)
o.applyToReader(csvReader)
return bufferedCSV(csvWriter, csvReader, o)
case t.Kind() == reflect.String:
buf := bytes.NewBufferString(v.String())
csvReader := csv.NewReader(buf)
o.applyToReader(csvReader)
return bufferedCSV(csvWriter, csvReader, o)
default:
return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s",
data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface",
)
}
}
})
}
// pipeCSV copies CSV records from a CSV reader to a CSV writer
func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error {
for ; opts.skippedLines > 0; opts.skippedLines-- {
_, err := csvReader.Read()
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
}
for {
record, err := csvReader.Read()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
if err := csvWriter.Write(record); err != nil {
return err
}
}
csvWriter.Flush()
return csvWriter.Error()
}
// bufferedCSV copies CSV records from a CSV reader to a CSV writer,
// by first reading all records then writing them at once.
func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error {
for ; opts.skippedLines > 0; opts.skippedLines-- {
_, err := csvReader.Read()
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
}
csvReader := csv.NewReader(bytes.NewBuffer(dataBytes))
records, err := csvReader.ReadAll()
if err != nil {
return err
}
csvWriter := csv.NewWriter(writer)
for _, r := range records {
if err := csvWriter.Write(r); err != nil {
return err
}
}
csvWriter.Flush()
return nil
})
return csvWriter.WriteAll(records)
}

121
vendor/github.com/go-openapi/runtime/csv_options.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
package runtime
import (
"encoding/csv"
"io"
)
// CSVOpts alter the behavior of the CSV consumer or producer.
type CSVOpt func(*csvOpts)
type csvOpts struct {
csvReader csv.Reader
csvWriter csv.Writer
skippedLines int
closeStream bool
}
// WithCSVReaderOpts specifies the options to csv.Reader
// when reading CSV.
func WithCSVReaderOpts(reader csv.Reader) CSVOpt {
return func(o *csvOpts) {
o.csvReader = reader
}
}
// WithCSVWriterOpts specifies the options to csv.Writer
// when writing CSV.
func WithCSVWriterOpts(writer csv.Writer) CSVOpt {
return func(o *csvOpts) {
o.csvWriter = writer
}
}
// WithCSVSkipLines will skip header lines.
func WithCSVSkipLines(skipped int) CSVOpt {
return func(o *csvOpts) {
o.skippedLines = skipped
}
}
func WithCSVClosesStream() CSVOpt {
return func(o *csvOpts) {
o.closeStream = true
}
}
func (o csvOpts) applyToReader(in *csv.Reader) {
if o.csvReader.Comma != 0 {
in.Comma = o.csvReader.Comma
}
if o.csvReader.Comment != 0 {
in.Comment = o.csvReader.Comment
}
if o.csvReader.FieldsPerRecord != 0 {
in.FieldsPerRecord = o.csvReader.FieldsPerRecord
}
in.LazyQuotes = o.csvReader.LazyQuotes
in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace
in.ReuseRecord = o.csvReader.ReuseRecord
}
func (o csvOpts) applyToWriter(in *csv.Writer) {
if o.csvWriter.Comma != 0 {
in.Comma = o.csvWriter.Comma
}
in.UseCRLF = o.csvWriter.UseCRLF
}
func csvOptsWithDefaults(opts []CSVOpt) csvOpts {
var o csvOpts
for _, apply := range opts {
apply(&o)
}
return o
}
type CSVWriter interface {
Write([]string) error
Flush()
Error() error
}
type CSVReader interface {
Read() ([]string, error)
}
var (
_ CSVWriter = &csvRecordsWriter{}
_ CSVReader = &csvRecordsWriter{}
)
// csvRecordsWriter is an internal container to move CSV records back and forth
type csvRecordsWriter struct {
i int
records [][]string
}
func (w *csvRecordsWriter) Write(record []string) error {
w.records = append(w.records, record)
return nil
}
func (w *csvRecordsWriter) Read() ([]string, error) {
if w.i >= len(w.records) {
return nil, io.EOF
}
defer func() {
w.i++
}()
return w.records[w.i], nil
}
func (w *csvRecordsWriter) Flush() {}
func (w *csvRecordsWriter) Error() error {
return nil
}

View file

@ -5,6 +5,8 @@ import (
"os"
)
var _ Logger = StandardLogger{}
type StandardLogger struct{}
func (StandardLogger) Printf(format string, args ...interface{}) {

View file

@ -18,6 +18,8 @@ import (
stdContext "context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"sync"
@ -35,12 +37,21 @@ import (
// Debug when true turns on verbose logging
var Debug = logger.DebugEnabled()
// Logger is the standard libray logger used for printing debug messages
var Logger logger.Logger = logger.StandardLogger{}
func debugLog(format string, args ...interface{}) { //nolint:goprintffuncname
if Debug {
Logger.Printf(format, args...)
func debugLogfFunc(lg logger.Logger) func(string, ...any) {
if logger.DebugEnabled() {
if lg == nil {
return Logger.Debugf
}
return lg.Debugf
}
// muted logger
return func(_ string, _ ...any) {}
}
// A Builder can create middlewares
@ -77,6 +88,7 @@ type Context struct {
analyzer *analysis.Spec
api RoutableAPI
router Router
debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type routableUntypedAPI struct {
@ -189,7 +201,9 @@ func (r *routableUntypedAPI) DefaultConsumes() string {
return r.defaultConsumes
}
// NewRoutableContext creates a new context for a routable API
// NewRoutableContext creates a new context for a routable API.
//
// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
@ -199,26 +213,40 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro
return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes)
}
// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too
// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes as input an already analysed spec.
//
// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context {
// Either there are no spec doc and analysis, or both of them.
if !((spec == nil && an == nil) || (spec != nil && an != nil)) {
panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them"))
}
ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes}
return ctx
return &Context{
spec: spec,
api: routableAPI,
analyzer: an,
router: routes,
debugLogf: debugLogfFunc(nil),
}
}
// NewContext creates a new context wrapper
// NewContext creates a new context wrapper.
//
// If a nil Router is provided, the DefaultRouter (denco-based) will be used.
func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context {
var an *analysis.Spec
if spec != nil {
an = analysis.New(spec.Spec())
}
ctx := &Context{spec: spec, analyzer: an}
ctx := &Context{
spec: spec,
analyzer: an,
router: routes,
debugLogf: debugLogfFunc(nil),
}
ctx.api = newRoutableUntypedAPI(spec, api, ctx)
ctx.router = routes
return ctx
}
@ -282,6 +310,13 @@ func (c *Context) BasePath() string {
return c.spec.BasePath()
}
// SetLogger allows for injecting a logger to catch debug entries.
//
// The logger is enabled in DEBUG mode only.
func (c *Context) SetLogger(lg logger.Logger) {
c.debugLogf = debugLogfFunc(lg)
}
// RequiredProduces returns the accepted content types for responses
func (c *Context) RequiredProduces() []string {
return c.analyzer.RequiredProduces()
@ -299,6 +334,7 @@ func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, b
if err != nil {
res = append(res, err)
} else {
c.debugLogf("validating content type for %q against [%s]", ct, strings.Join(route.Consumes, ", "))
if err := validateContentType(route.Consumes, ct); err != nil {
res = append(res, err)
}
@ -397,16 +433,16 @@ func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *htt
var rCtx = r.Context()
if v, ok := rCtx.Value(ctxResponseFormat).(string); ok {
debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
c.debugLogf("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
return v, r
}
format := NegotiateContentType(r, offers, "")
if format != "" {
debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
c.debugLogf("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format))
}
debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
c.debugLogf("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
return format, r
}
@ -469,7 +505,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
var rCtx = request.Context()
if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok {
debugLog("got cached validation (valid: %t)", len(v.result) == 0)
c.debugLogf("got cached validation (valid: %t)", len(v.result) == 0)
if len(v.result) > 0 {
return v.bound, request, errors.CompositeValidationError(v.result...)
}
@ -481,7 +517,7 @@ func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute)
if len(result.result) > 0 {
return result.bound, request, errors.CompositeValidationError(result.result...)
}
debugLog("no validation errors found")
c.debugLogf("no validation errors found")
return result.bound, request, nil
}
@ -492,7 +528,7 @@ func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {
// Respond renders the response after doing some content negotiation
func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {
debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
c.debugLogf("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
offers := []string{}
for _, mt := range produces {
if mt != c.api.DefaultProduces() {
@ -501,7 +537,7 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
}
// the default producer is last so more specific producers take precedence
offers = append(offers, c.api.DefaultProduces())
debugLog("offers: %v", offers)
c.debugLogf("offers: %v", offers)
var format string
format, r = c.ResponseFormat(r, offers)
@ -584,45 +620,92 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response"))
}
func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler {
// APIHandlerSwaggerUI returns a handler to serve the API.
//
// This handler includes a swagger spec, router and the contract defined in the swagger spec.
//
// A spec UI (SwaggerUI) is served at {API base path}/docs and the spec document at /swagger.json
// (these can be modified with uiOptions).
func (c *Context) APIHandlerSwaggerUI(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
var swaggerUIOpts SwaggerUIOpts
fromCommonToAnyOptions(uiOpts, &swaggerUIOpts)
return Spec(specPath, c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)), specOpts...)
}
// APIHandlerRapiDoc returns a handler to serve the API.
//
// This handler includes a swagger spec, router and the contract defined in the swagger spec.
//
// A spec UI (RapiDoc) is served at {API base path}/docs and the spec document at /swagger.json
// (these can be modified with uiOptions).
func (c *Context) APIHandlerRapiDoc(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
var rapidocUIOpts RapiDocOpts
fromCommonToAnyOptions(uiOpts, &rapidocUIOpts)
return Spec(specPath, c.spec.Raw(), RapiDoc(rapidocUIOpts, c.RoutesHandler(b)), specOpts...)
}
// APIHandler returns a handler to serve the API.
//
// This handler includes a swagger spec, router and the contract defined in the swagger spec.
//
// A spec UI (Redoc) is served at {API base path}/docs and the spec document at /swagger.json
// (these can be modified with uiOptions).
func (c *Context) APIHandler(builder Builder, opts ...UIOption) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
specPath, uiOpts, specOpts := c.uiOptionsForHandler(opts)
var redocOpts RedocOpts
fromCommonToAnyOptions(uiOpts, &redocOpts)
return Spec(specPath, c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)), specOpts...)
}
func (c Context) uiOptionsForHandler(opts []UIOption) (string, uiOptions, []SpecOption) {
var title string
sp := c.spec.Spec()
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
title = sp.Info.Title
}
swaggerUIOpts := SwaggerUIOpts{
BasePath: c.BasePath(),
Title: title,
// default options (may be overridden)
optsForContext := []UIOption{
WithUIBasePath(c.BasePath()),
WithUITitle(title),
}
optsForContext = append(optsForContext, opts...)
uiOpts := uiOptionsWithDefaults(optsForContext)
// If spec URL is provided, there is a non-default path to serve the spec.
// This makes sure that the UI middleware is aligned with the Spec middleware.
u, _ := url.Parse(uiOpts.SpecURL)
var specPath string
if u != nil {
specPath = u.Path
}
return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)))
pth, doc := path.Split(specPath)
if pth == "." {
pth = ""
}
// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec
func (c *Context) APIHandler(builder Builder) http.Handler {
b := builder
if b == nil {
b = PassthroughBuilder
}
var title string
sp := c.spec.Spec()
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
title = sp.Info.Title
}
redocOpts := RedocOpts{
BasePath: c.BasePath(),
Title: title,
}
return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)))
return pth, uiOpts, []SpecOption{WithSpecDocument(doc)}
}
// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec

View file

@ -2,6 +2,7 @@
package denco
import (
"errors"
"fmt"
"sort"
"strings"
@ -29,13 +30,13 @@ const (
// Router represents a URL router.
type Router struct {
param *doubleArray
// SizeHint expects the maximum number of path parameters in records to Build.
// SizeHint will be used to determine the capacity of the memory to allocate.
// By default, SizeHint will be determined from given records to Build.
SizeHint int
static map[string]interface{}
param *doubleArray
}
// New returns a new Router.
@ -71,7 +72,7 @@ func (rt *Router) Lookup(path string) (data interface{}, params Params, found bo
func (rt *Router) Build(records []Record) error {
statics, params := makeRecords(records)
if len(params) > MaxSize {
return fmt.Errorf("denco: too many records")
return errors.New("denco: too many records")
}
if rt.SizeHint < 0 {
rt.SizeHint = 0
@ -197,24 +198,29 @@ func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Pa
if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter {
return da.node[da.bc[next].Base()], params, true
}
BACKTRACKING:
for j := len(indices) - 1; j >= 0; j-- {
i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff)
if da.bc[idx].IsSingleParam() {
idx := nextIndex(da.bc[idx].Base(), ParamCharacter) //nolint:govet
if idx >= len(da.bc) {
nextIdx := nextIndex(da.bc[idx].Base(), ParamCharacter)
if nextIdx >= len(da.bc) {
break
}
next := NextSeparator(path, i)
params := append(params, Param{Value: path[i:next]}) //nolint:govet
if nd, params, found := da.lookup(path[next:], params, idx); found { //nolint:govet
return nd, params, true
nextParams := params
nextParams = append(nextParams, Param{Value: path[i:next]})
if nd, nextNextParams, found := da.lookup(path[next:], nextParams, nextIdx); found {
return nd, nextNextParams, true
}
}
if da.bc[idx].IsWildcardParam() {
idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) //nolint:govet
params := append(params, Param{Value: path[i:]}) //nolint:govet
return da.node[da.bc[idx].Base()], params, true
nextIdx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
nextParams := params
nextParams = append(nextParams, Param{Value: path[i:]})
return da.node[da.bc[nextIdx].Base()], nextParams, true
}
}
return nil, nil, false
@ -326,7 +332,7 @@ func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[i
}
base = da.findBase(siblings, idx, usedBase)
if base > MaxSize {
return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice")
return -1, nil, nil, errors.New("denco: too many elements of internal slice")
}
da.setBase(idx, base)
return base, siblings, leaf, err
@ -387,7 +393,7 @@ func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, er
case pc == c:
continue
default:
return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted")
return nil, nil, errors.New("denco: BUG: routing table hasn't been sorted")
}
if n > 0 {
sib[n-1].end = i

View file

@ -1,10 +0,0 @@
//go:build go1.8
// +build go1.8
package middleware
import "net/url"
func pathUnescape(path string) (string, error) {
return url.PathUnescape(path)
}

View file

@ -1,9 +0,0 @@
// +build !go1.8
package middleware
import "net/url"
func pathUnescape(path string) (string, error) {
return url.QueryUnescape(path)
}

View file

@ -1,4 +1,3 @@
//nolint:dupl
package middleware
import (
@ -11,66 +10,57 @@ import (
// RapiDocOpts configures the RapiDoc middlewares
type RapiDocOpts struct {
// BasePath for the UI path, defaults to: /
// BasePath for the UI, defaults to: /
BasePath string
// Path combines with BasePath for the full UI path, defaults to: docs
// Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
// SpecURL the url to find the spec for
// SpecURL is the URL of the spec document.
//
// Defaults to: /swagger.json
SpecURL string
// RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js
RapiDocURL string
// Title for the documentation site, default to: API documentation
Title string
// Template specifies a custom template to serve the UI
Template string
// RapiDocURL points to the js asset that generates the rapidoc site.
//
// Defaults to https://unpkg.com/rapidoc/dist/rapidoc-min.js
RapiDocURL string
}
// EnsureDefaults in case some options are missing
func (r *RapiDocOpts) EnsureDefaults() {
if r.BasePath == "" {
r.BasePath = "/"
}
if r.Path == "" {
r.Path = defaultDocsPath
}
if r.SpecURL == "" {
r.SpecURL = defaultDocsURL
}
common := toCommonUIOptions(r)
common.EnsureDefaults()
fromCommonToAnyOptions(common, r)
// rapidoc-specifics
if r.RapiDocURL == "" {
r.RapiDocURL = rapidocLatest
}
if r.Title == "" {
r.Title = defaultDocsTitle
if r.Template == "" {
r.Template = rapidocTemplate
}
}
// RapiDoc creates a middleware to serve a documentation site for a swagger spec.
//
// This allows for altering the spec before starting the http listener.
func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate))
buf := bytes.NewBuffer(nil)
_ = tmpl.Execute(buf, opts)
b := buf.Bytes()
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if r.URL.Path == pth {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(b)
return
tmpl := template.Must(template.New("rapidoc").Parse(opts.Template))
assets := bytes.NewBuffer(nil)
if err := tmpl.Execute(assets, opts); err != nil {
panic(fmt.Errorf("cannot execute template: %w", err))
}
if next == nil {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
return
}
next.ServeHTTP(rw, r)
})
return serveUI(pth, assets.Bytes(), next)
}
const (

View file

@ -1,4 +1,3 @@
//nolint:dupl
package middleware
import (
@ -11,66 +10,58 @@ import (
// RedocOpts configures the Redoc middlewares
type RedocOpts struct {
// BasePath for the UI path, defaults to: /
// BasePath for the UI, defaults to: /
BasePath string
// Path combines with BasePath for the full UI path, defaults to: docs
// Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
// SpecURL the url to find the spec for
// SpecURL is the URL of the spec document.
//
// Defaults to: /swagger.json
SpecURL string
// RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
RedocURL string
// Title for the documentation site, default to: API documentation
Title string
// Template specifies a custom template to serve the UI
Template string
// RedocURL points to the js that generates the redoc site.
//
// Defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
RedocURL string
}
// EnsureDefaults in case some options are missing
func (r *RedocOpts) EnsureDefaults() {
if r.BasePath == "" {
r.BasePath = "/"
}
if r.Path == "" {
r.Path = defaultDocsPath
}
if r.SpecURL == "" {
r.SpecURL = defaultDocsURL
}
common := toCommonUIOptions(r)
common.EnsureDefaults()
fromCommonToAnyOptions(common, r)
// redoc-specifics
if r.RedocURL == "" {
r.RedocURL = redocLatest
}
if r.Title == "" {
r.Title = defaultDocsTitle
if r.Template == "" {
r.Template = redocTemplate
}
}
// Redoc creates a middleware to serve a documentation site for a swagger spec.
//
// This allows for altering the spec before starting the http listener.
func Redoc(opts RedocOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
tmpl := template.Must(template.New("redoc").Parse(redocTemplate))
buf := bytes.NewBuffer(nil)
_ = tmpl.Execute(buf, opts)
b := buf.Bytes()
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if r.URL.Path == pth {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(b)
return
tmpl := template.Must(template.New("redoc").Parse(opts.Template))
assets := bytes.NewBuffer(nil)
if err := tmpl.Execute(assets, opts); err != nil {
panic(fmt.Errorf("cannot execute template: %w", err))
}
if next == nil {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
return
}
next.ServeHTTP(rw, r)
})
return serveUI(pth, assets.Bytes(), next)
}
const (

View file

@ -19,10 +19,10 @@ import (
"reflect"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/logger"
"github.com/go-openapi/spec"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/runtime"
)
// UntypedRequestBinder binds and validates the data from a http request
@ -31,6 +31,7 @@ type UntypedRequestBinder struct {
Parameters map[string]spec.Parameter
Formats strfmt.Registry
paramBinders map[string]*untypedParamBinder
debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
// NewUntypedRequestBinder creates a new binder for reading a request.
@ -44,6 +45,7 @@ func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Sw
paramBinders: binders,
Spec: spec,
Formats: formats,
debugLogf: debugLogfFunc(nil),
}
}
@ -52,10 +54,10 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
val := reflect.Indirect(reflect.ValueOf(data))
isMap := val.Kind() == reflect.Map
var result []error
debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
o.debugLogf("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
for fieldName, param := range o.Parameters {
binder := o.paramBinders[fieldName]
debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
o.debugLogf("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
var target reflect.Value
if !isMap {
binder.Name = fieldName
@ -102,3 +104,14 @@ func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RoutePara
return nil
}
// SetLogger allows for injecting a logger to catch debug entries.
//
// The logger is enabled in DEBUG mode only.
func (o *UntypedRequestBinder) SetLogger(lg logger.Logger) {
o.debugLogf = debugLogfFunc(lg)
}
func (o *UntypedRequestBinder) setDebugLogf(fn func(string, ...any)) {
o.debugLogf = fn
}

View file

@ -17,10 +17,12 @@ package middleware
import (
"fmt"
"net/http"
"net/url"
fpath "path"
"regexp"
"strings"
"github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/security"
"github.com/go-openapi/swag"
@ -67,10 +69,10 @@ func (r RouteParams) GetOK(name string) ([]string, bool, bool) {
return nil, false, false
}
// NewRouter creates a new context aware router middleware
// NewRouter creates a new context-aware router middleware
func NewRouter(ctx *Context, next http.Handler) http.Handler {
if ctx.router == nil {
ctx.router = DefaultRouter(ctx.spec, ctx.api)
ctx.router = DefaultRouter(ctx.spec, ctx.api, WithDefaultRouterLoggerFunc(ctx.debugLogf))
}
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
@ -103,7 +105,7 @@ type RoutableAPI interface {
DefaultConsumes() string
}
// Router represents a swagger aware router
// Router represents a swagger-aware router
type Router interface {
Lookup(method, path string) (*MatchedRoute, bool)
OtherMethods(method, path string) []string
@ -114,30 +116,64 @@ type defaultRouteBuilder struct {
analyzer *analysis.Spec
api RoutableAPI
records map[string][]denco.Record
debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
type defaultRouter struct {
spec *loads.Document
routers map[string]*denco.Router
debugLogf func(string, ...any) // a logging function to debug context and all components using it
}
func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) *defaultRouteBuilder {
var o defaultRouterOpts
for _, apply := range opts {
apply(&o)
}
if o.debugLogf == nil {
o.debugLogf = debugLogfFunc(nil) // defaults to standard logger
}
func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder {
return &defaultRouteBuilder{
spec: spec,
analyzer: analysis.New(spec.Spec()),
api: api,
records: make(map[string][]denco.Record),
debugLogf: o.debugLogf,
}
}
// DefaultRouter creates a default implemenation of the router
func DefaultRouter(spec *loads.Document, api RoutableAPI) Router {
builder := newDefaultRouteBuilder(spec, api)
// DefaultRouterOpt allows to inject optional behavior to the default router.
type DefaultRouterOpt func(*defaultRouterOpts)
type defaultRouterOpts struct {
debugLogf func(string, ...any)
}
// WithDefaultRouterLogger sets the debug logger for the default router.
//
// This is enabled only in DEBUG mode.
func WithDefaultRouterLogger(lg logger.Logger) DefaultRouterOpt {
return func(o *defaultRouterOpts) {
o.debugLogf = debugLogfFunc(lg)
}
}
// WithDefaultRouterLoggerFunc sets a logging debug method for the default router.
func WithDefaultRouterLoggerFunc(fn func(string, ...any)) DefaultRouterOpt {
return func(o *defaultRouterOpts) {
o.debugLogf = fn
}
}
// DefaultRouter creates a default implementation of the router
func DefaultRouter(spec *loads.Document, api RoutableAPI, opts ...DefaultRouterOpt) Router {
builder := newDefaultRouteBuilder(spec, api, opts...)
if spec != nil {
for method, paths := range builder.analyzer.Operations() {
for path, operation := range paths {
fp := fpath.Join(spec.BasePath(), path)
debugLog("adding route %s %s %q", method, fp, operation.ID)
builder.debugLogf("adding route %s %s %q", method, fp, operation.ID)
builder.AddRoute(method, fp, operation)
}
}
@ -319,24 +355,24 @@ func (m *MatchedRoute) NeedsAuth() bool {
func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
mth := strings.ToUpper(method)
debugLog("looking up route for %s %s", method, path)
d.debugLogf("looking up route for %s %s", method, path)
if Debug {
if len(d.routers) == 0 {
debugLog("there are no known routers")
d.debugLogf("there are no known routers")
}
for meth := range d.routers {
debugLog("got a router for %s", meth)
d.debugLogf("got a router for %s", meth)
}
}
if router, ok := d.routers[mth]; ok {
if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil {
if entry, ok := m.(*routeEntry); ok {
debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
d.debugLogf("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
var params RouteParams
for _, p := range rp {
v, err := pathUnescape(p.Value)
v, err := url.PathUnescape(p.Value)
if err != nil {
debugLog("failed to escape %q: %v", p.Value, err)
d.debugLogf("failed to escape %q: %v", p.Value, err)
v = p.Value
}
// a workaround to handle fragment/composing parameters until they are supported in denco router
@ -356,10 +392,10 @@ func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
return &MatchedRoute{routeEntry: *entry, Params: params}, true
}
} else {
debugLog("couldn't find a route by path for %s %s", method, path)
d.debugLogf("couldn't find a route by path for %s %s", method, path)
}
} else {
debugLog("couldn't find a route by method for %s %s", method, path)
d.debugLogf("couldn't find a route by method for %s %s", method, path)
}
return nil, false
}
@ -378,6 +414,10 @@ func (d *defaultRouter) OtherMethods(method, path string) []string {
return methods
}
func (d *defaultRouter) SetLogger(lg logger.Logger) {
d.debugLogf = debugLogfFunc(lg)
}
// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco
var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`)
@ -413,7 +453,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
bp = bp[:len(bp)-1]
}
debugLog("operation: %#v", *operation)
d.debugLogf("operation: %#v", *operation)
if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok {
consumes := d.analyzer.ConsumesFor(operation)
produces := d.analyzer.ProducesFor(operation)
@ -428,6 +468,8 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
produces = append(produces, defProduces)
}
requestBinder := NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats())
requestBinder.setDebugLogf(d.debugLogf)
record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{
BasePath: bp,
PathPattern: path,
@ -439,7 +481,7 @@ func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Oper
Producers: d.api.ProducersFor(normalizeOffers(produces)),
Parameters: parameters,
Formats: d.api.Formats(),
Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()),
Binder: requestBinder,
Authenticators: d.buildAuthenticators(operation),
Authorizer: d.api.Authorizer(),
})
@ -484,5 +526,6 @@ func (d *defaultRouteBuilder) Build() *defaultRouter {
return &defaultRouter{
spec: d.spec,
routers: routers,
debugLogf: d.debugLogf,
}
}

View file

@ -19,29 +19,84 @@ import (
"path"
)
// Spec creates a middleware to serve a swagger spec.
const (
contentTypeHeader = "Content-Type"
applicationJSON = "application/json"
)
// SpecOption can be applied to the Spec serving middleware
type SpecOption func(*specOptions)
var defaultSpecOptions = specOptions{
Path: "",
Document: "swagger.json",
}
type specOptions struct {
Path string
Document string
}
func specOptionsWithDefaults(opts []SpecOption) specOptions {
o := defaultSpecOptions
for _, apply := range opts {
apply(&o)
}
return o
}
// Spec creates a middleware to serve a swagger spec as a JSON document.
//
// This allows for altering the spec before starting the http listener.
// This can be useful if you want to serve the swagger spec from another path than /swagger.json
func Spec(basePath string, b []byte, next http.Handler) http.Handler {
//
// The basePath argument indicates the path of the spec document (defaults to "/").
// Additional SpecOption can be used to change the name of the document (defaults to "swagger.json").
func Spec(basePath string, b []byte, next http.Handler, opts ...SpecOption) http.Handler {
if basePath == "" {
basePath = "/"
}
pth := path.Join(basePath, "swagger.json")
o := specOptionsWithDefaults(opts)
pth := path.Join(basePath, o.Path, o.Document)
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if r.URL.Path == pth {
rw.Header().Set("Content-Type", "application/json")
if path.Clean(r.URL.Path) == pth {
rw.Header().Set(contentTypeHeader, applicationJSON)
rw.WriteHeader(http.StatusOK)
//#nosec
_, _ = rw.Write(b)
return
}
if next == nil {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusNotFound)
if next != nil {
next.ServeHTTP(rw, r)
return
}
next.ServeHTTP(rw, r)
rw.Header().Set(contentTypeHeader, applicationJSON)
rw.WriteHeader(http.StatusNotFound)
})
}
// WithSpecPath sets the path to be joined to the base path of the Spec middleware.
//
// This is empty by default.
func WithSpecPath(pth string) SpecOption {
return func(o *specOptions) {
o.Path = pth
}
}
// WithSpecDocument sets the name of the JSON document served as a spec.
//
// By default, this is "swagger.json"
func WithSpecDocument(doc string) SpecOption {
return func(o *specOptions) {
if doc == "" {
return
}
o.Document = doc
}
}

View file

@ -8,40 +8,65 @@ import (
"path"
)
// SwaggerUIOpts configures the Swaggerui middlewares
// SwaggerUIOpts configures the SwaggerUI middleware
type SwaggerUIOpts struct {
// BasePath for the UI path, defaults to: /
// BasePath for the API, defaults to: /
BasePath string
// Path combines with BasePath for the full UI path, defaults to: docs
// Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
// SpecURL the url to find the spec for
// SpecURL is the URL of the spec document.
//
// Defaults to: /swagger.json
SpecURL string
// Title for the documentation site, default to: API documentation
Title string
// Template specifies a custom template to serve the UI
Template string
// OAuthCallbackURL the url called after OAuth2 login
OAuthCallbackURL string
// The three components needed to embed swagger-ui
// SwaggerURL points to the js that generates the SwaggerUI site.
//
// Defaults to: https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js
SwaggerURL string
SwaggerPresetURL string
SwaggerStylesURL string
Favicon32 string
Favicon16 string
// Title for the documentation site, default to: API documentation
Title string
}
// EnsureDefaults in case some options are missing
func (r *SwaggerUIOpts) EnsureDefaults() {
if r.BasePath == "" {
r.BasePath = "/"
r.ensureDefaults()
if r.Template == "" {
r.Template = swaggeruiTemplate
}
if r.Path == "" {
r.Path = defaultDocsPath
}
if r.SpecURL == "" {
r.SpecURL = defaultDocsURL
func (r *SwaggerUIOpts) EnsureDefaultsOauth2() {
r.ensureDefaults()
if r.Template == "" {
r.Template = swaggerOAuthTemplate
}
}
func (r *SwaggerUIOpts) ensureDefaults() {
common := toCommonUIOptions(r)
common.EnsureDefaults()
fromCommonToAnyOptions(common, r)
// swaggerui-specifics
if r.OAuthCallbackURL == "" {
r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback")
}
@ -60,40 +85,22 @@ func (r *SwaggerUIOpts) EnsureDefaults() {
if r.Favicon32 == "" {
r.Favicon32 = swaggerFavicon32Latest
}
if r.Title == "" {
r.Title = defaultDocsTitle
}
}
// SwaggerUI creates a middleware to serve a documentation site for a swagger spec.
//
// This allows for altering the spec before starting the http listener.
func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := path.Join(opts.BasePath, opts.Path)
tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate))
buf := bytes.NewBuffer(nil)
_ = tmpl.Execute(buf, &opts)
b := buf.Bytes()
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if path.Join(r.URL.Path) == pth {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(b)
return
tmpl := template.Must(template.New("swaggerui").Parse(opts.Template))
assets := bytes.NewBuffer(nil)
if err := tmpl.Execute(assets, opts); err != nil {
panic(fmt.Errorf("cannot execute template: %w", err))
}
if next == nil {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
return
}
next.ServeHTTP(rw, r)
})
return serveUI(pth, assets.Bytes(), next)
}
const (

View file

@ -4,37 +4,20 @@ import (
"bytes"
"fmt"
"net/http"
"path"
"text/template"
)
func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
opts.EnsureDefaultsOauth2()
pth := opts.OAuthCallbackURL
tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate))
buf := bytes.NewBuffer(nil)
_ = tmpl.Execute(buf, &opts)
b := buf.Bytes()
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if path.Join(r.URL.Path) == pth {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(b)
return
tmpl := template.Must(template.New("swaggeroauth").Parse(opts.Template))
assets := bytes.NewBuffer(nil)
if err := tmpl.Execute(assets, opts); err != nil {
panic(fmt.Errorf("cannot execute template: %w", err))
}
if next == nil {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
return
}
next.ServeHTTP(rw, r)
})
return serveUI(pth, assets.Bytes(), next)
}
const (

View file

@ -1,8 +0,0 @@
package middleware
const (
// constants that are common to all UI-serving middlewares
defaultDocsPath = "docs"
defaultDocsURL = "/swagger.json"
defaultDocsTitle = "API Documentation"
)

View file

@ -0,0 +1,173 @@
package middleware
import (
"bytes"
"encoding/gob"
"fmt"
"net/http"
"path"
"strings"
)
const (
// constants that are common to all UI-serving middlewares
defaultDocsPath = "docs"
defaultDocsURL = "/swagger.json"
defaultDocsTitle = "API Documentation"
)
// uiOptions defines common options for UI serving middlewares.
type uiOptions struct {
// BasePath for the UI, defaults to: /
BasePath string
// Path combines with BasePath to construct the path to the UI, defaults to: "docs".
Path string
// SpecURL is the URL of the spec document.
//
// Defaults to: /swagger.json
SpecURL string
// Title for the documentation site, default to: API documentation
Title string
// Template specifies a custom template to serve the UI
Template string
}
// toCommonUIOptions converts any UI option type to retain the common options.
//
// This uses gob encoding/decoding to convert common fields from one struct to another.
func toCommonUIOptions(opts interface{}) uiOptions {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
dec := gob.NewDecoder(&buf)
var o uiOptions
err := enc.Encode(opts)
if err != nil {
panic(err)
}
err = dec.Decode(&o)
if err != nil {
panic(err)
}
return o
}
func fromCommonToAnyOptions[T any](source uiOptions, target *T) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
dec := gob.NewDecoder(&buf)
err := enc.Encode(source)
if err != nil {
panic(err)
}
err = dec.Decode(target)
if err != nil {
panic(err)
}
}
// UIOption can be applied to UI serving middleware, such as Context.APIHandler or
// Context.APIHandlerSwaggerUI to alter the defaut behavior.
type UIOption func(*uiOptions)
func uiOptionsWithDefaults(opts []UIOption) uiOptions {
var o uiOptions
for _, apply := range opts {
apply(&o)
}
return o
}
// WithUIBasePath sets the base path from where to serve the UI assets.
//
// By default, Context middleware sets this value to the API base path.
func WithUIBasePath(base string) UIOption {
return func(o *uiOptions) {
if !strings.HasPrefix(base, "/") {
base = "/" + base
}
o.BasePath = base
}
}
// WithUIPath sets the path from where to serve the UI assets (i.e. /{basepath}/{path}.
func WithUIPath(pth string) UIOption {
return func(o *uiOptions) {
o.Path = pth
}
}
// WithUISpecURL sets the path from where to serve swagger spec document.
//
// This may be specified as a full URL or a path.
//
// By default, this is "/swagger.json"
func WithUISpecURL(specURL string) UIOption {
return func(o *uiOptions) {
o.SpecURL = specURL
}
}
// WithUITitle sets the title of the UI.
//
// By default, Context middleware sets this value to the title found in the API spec.
func WithUITitle(title string) UIOption {
return func(o *uiOptions) {
o.Title = title
}
}
// WithTemplate allows to set a custom template for the UI.
//
// UI middleware will panic if the template does not parse or execute properly.
func WithTemplate(tpl string) UIOption {
return func(o *uiOptions) {
o.Template = tpl
}
}
// EnsureDefaults in case some options are missing
func (r *uiOptions) EnsureDefaults() {
if r.BasePath == "" {
r.BasePath = "/"
}
if r.Path == "" {
r.Path = defaultDocsPath
}
if r.SpecURL == "" {
r.SpecURL = defaultDocsURL
}
if r.Title == "" {
r.Title = defaultDocsTitle
}
}
// serveUI creates a middleware that serves a templated asset as text/html.
func serveUI(pth string, assets []byte, next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if path.Clean(r.URL.Path) == pth {
rw.Header().Set(contentTypeHeader, "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(assets)
return
}
if next != nil {
next.ServeHTTP(rw, r)
return
}
rw.Header().Set(contentTypeHeader, "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
})
}

View file

@ -35,7 +35,6 @@ type validation struct {
// ContentType validates the content type of a request
func validateContentType(allowed []string, actual string) error {
debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", "))
if len(allowed) == 0 {
return nil
}
@ -57,13 +56,13 @@ func validateContentType(allowed []string, actual string) error {
}
func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation {
debugLog("validating request %s %s", request.Method, request.URL.EscapedPath())
validate := &validation{
context: ctx,
request: request,
route: route,
bound: make(map[string]interface{}),
}
validate.debugLogf("validating request %s %s", request.Method, request.URL.EscapedPath())
validate.contentType()
if len(validate.result) == 0 {
@ -76,8 +75,12 @@ func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *
return validate
}
func (v *validation) debugLogf(format string, args ...any) {
v.context.debugLogf(format, args...)
}
func (v *validation) parameters() {
debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
v.debugLogf("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil {
if result.Error() == "validation failure list" {
for _, e := range result.(*errors.Validation).Value.([]interface{}) {
@ -91,7 +94,7 @@ func (v *validation) parameters() {
func (v *validation) contentType() {
if len(v.result) == 0 && runtime.HasBody(v.request) {
debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
v.debugLogf("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
ct, _, req, err := v.context.ContentType(v.request)
if err != nil {
v.result = append(v.result, err)
@ -100,6 +103,7 @@ func (v *validation) contentType() {
}
if len(v.result) == 0 {
v.debugLogf("validating content type for %q against [%s]", ct, strings.Join(v.route.Consumes, ", "))
if err := validateContentType(v.route.Consumes, ct); err != nil {
v.result = append(v.result, err)
}

View file

@ -29,3 +29,26 @@ The object model for OpenAPI specification documents.
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
>
> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3
* Does the unmarshaling support YAML?
> Not directly. The exposed types know only how to unmarshal from JSON.
>
> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by
> github.com/go-openapi/loads
>
> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec
>
> See also https://github.com/go-openapi/spec/issues/164
* How can I validate a spec?
> Validation is provided by [the validate package](http://github.com/go-openapi/validate)
* Why do we have an `ID` field for `Schema` which is not part of the swagger spec?
> We found jsonschema compatibility more important: since `id` in jsonschema influences
> how `$ref` are resolved.
> This `id` does not conflict with any property named `id`.
>
> See also https://github.com/go-openapi/spec/issues/23

View file

@ -57,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
if !options.SkipSchemas {
for key, definition := range spec.Definitions {
parentRefs := make([]string, 0, 10)
parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key))
parentRefs = append(parentRefs, "#/definitions/"+key)
def, err := expandSchema(definition, parentRefs, resolver, specBasePath)
if resolver.shouldStopOnError(err) {
@ -213,9 +213,21 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba
}
if target.Ref.String() != "" {
if !resolver.options.SkipSchemas {
return expandSchemaRef(target, parentRefs, resolver, basePath)
}
// when "expand" with SkipSchema, we just rebase the existing $ref without replacing
// the full schema.
rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath))
if err != nil {
return nil, err
}
target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
return &target, nil
}
for k := range target.Definitions {
tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath)
if resolver.shouldStopOnError(err) {
@ -525,21 +537,25 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) {
}
func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error {
ref, _, err := getRefAndSchema(input)
ref, sch, err := getRefAndSchema(input)
if err != nil {
return err
}
if ref == nil {
if ref == nil && sch == nil { // nothing to do
return nil
}
parentRefs := make([]string, 0, 10)
if ref != nil {
// dereference this $ref
if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
return err
}
ref, sch, _ := getRefAndSchema(input)
ref, sch, _ = getRefAndSchema(input)
}
if ref.String() != "" {
transitiveResolver := resolver.transitiveResolver(basePath, *ref)
basePath = resolver.updateBasePath(transitiveResolver, basePath)
@ -551,6 +567,7 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
if ref != nil {
*ref = Ref{}
}
return nil
}
@ -560,38 +577,29 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa
return ern
}
switch {
case resolver.isCircular(&rebasedRef, basePath, parentRefs...):
if resolver.isCircular(&rebasedRef, basePath, parentRefs...) {
// this is a circular $ref: stop expansion
if !resolver.options.AbsoluteCircularRef {
sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
} else {
sch.Ref = rebasedRef
}
case !resolver.options.SkipSchemas:
// schema expanded to a $ref in another root
sch.Ref = rebasedRef
debugLog("rebased to: %s", sch.Ref.String())
default:
// skip schema expansion but rebase $ref to schema
sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID)
}
}
// $ref expansion or rebasing is performed by expandSchema below
if ref != nil {
*ref = Ref{}
}
// expand schema
if !resolver.options.SkipSchemas {
// yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref)
s, err := expandSchema(*sch, parentRefs, resolver, basePath)
if resolver.shouldStopOnError(err) {
return err
}
if s == nil {
// guard for when continuing on error
return nil
}
if s != nil { // guard for when continuing on error
*sch = *s
}

View file

@ -25,6 +25,7 @@ import (
"strings"
"github.com/asaskevich/govalidator"
"github.com/google/uuid"
"go.mongodb.org/mongo-driver/bson"
)
@ -57,24 +58,35 @@ const (
// - long top-level domain names (e.g. example.london) are permitted
// - symbol unicode points are permitted (e.g. emoji) (not for top-level domain)
HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$`
// UUIDPattern Regex for UUID that allows uppercase
UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
// UUID3Pattern Regex for UUID3 that allows uppercase
UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$`
// UUID4Pattern Regex for UUID4 that allows uppercase
UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
// UUID5Pattern Regex for UUID5 that allows uppercase
UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$`
// json null type
jsonNull = "null"
)
const (
// UUIDPattern Regex for UUID that allows uppercase
//
// Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)`
// UUID3Pattern Regex for UUID3 that allows uppercase
//
// Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)`
// UUID4Pattern Regex for UUID4 that allows uppercase
//
// Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
// UUID5Pattern Regex for UUID5 that allows uppercase
//
// Deprecated: strfmt no longer uses regular expressions to validate UUIDs.
UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)`
)
var (
rxHostname = regexp.MustCompile(HostnamePattern)
rxUUID = regexp.MustCompile(UUIDPattern)
rxUUID3 = regexp.MustCompile(UUID3Pattern)
rxUUID4 = regexp.MustCompile(UUID4Pattern)
rxUUID5 = regexp.MustCompile(UUID5Pattern)
)
// IsHostname returns true when the string is a valid hostname
@ -99,24 +111,28 @@ func IsHostname(str string) bool {
return valid
}
// IsUUID returns true is the string matches a UUID, upper case is allowed
// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed
func IsUUID(str string) bool {
return rxUUID.MatchString(str)
_, err := uuid.Parse(str)
return err == nil
}
// IsUUID3 returns true is the string matches a UUID, upper case is allowed
// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed
func IsUUID3(str string) bool {
return rxUUID3.MatchString(str)
id, err := uuid.Parse(str)
return err == nil && id.Version() == uuid.Version(3)
}
// IsUUID4 returns true is the string matches a UUID, upper case is allowed
// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed
func IsUUID4(str string) bool {
return rxUUID4.MatchString(str)
id, err := uuid.Parse(str)
return err == nil && id.Version() == uuid.Version(4)
}
// IsUUID5 returns true is the string matches a UUID, upper case is allowed
// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed
func IsUUID5(str string) bool {
return rxUUID5.MatchString(str)
id, err := uuid.Parse(str)
return err == nil && id.Version() == uuid.Version(5)
}
// IsEmail validates an email address.

View file

@ -16,6 +16,7 @@ package strfmt
import (
"encoding"
stderrors "errors"
"fmt"
"reflect"
"strings"
@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
case "datetime":
input := data
if len(input) == 0 {
return nil, fmt.Errorf("empty string is an invalid datetime format")
return nil, stderrors.New("empty string is an invalid datetime format")
}
return ParseDateTime(input)
case "duration":

52
vendor/github.com/go-openapi/swag/BENCHMARK.md generated vendored Normal file
View file

@ -0,0 +1,52 @@
# Benchmarks
## Name mangling utilities
```bash
go test -bench XXX -run XXX -benchtime 30s
```
### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
```
### Benchmarks after PR #79
~ x10 performance improvement and ~ /100 memory allocations.
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
```
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: AMD Ryzen 7 5800X 8-Core Processor
BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
```

View file

@ -16,9 +16,130 @@ package swag
import (
"sort"
"strings"
"sync"
)
var (
// commonInitialisms are common acronyms that are kept as whole uppercased words.
commonInitialisms *indexOfInitialisms
// initialisms is a slice of sorted initialisms
initialisms []string
// a copy of initialisms pre-baked as []rune
initialismsRunes [][]rune
initialismsUpperCased [][]rune
isInitialism func(string) bool
maxAllocMatches int
)
func init() {
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
configuredInitialisms := map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTPS": true,
"HTTP": true,
"ID": true,
"IP": true,
"IPv4": true,
"IPv6": true,
"JSON": true,
"LHS": true,
"OAI": true,
"QPS": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"UUID": true,
"URI": true,
"URL": true,
"UTF8": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
}
// a thread-safe index of initialisms
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
initialisms = commonInitialisms.sorted()
initialismsRunes = asRunes(initialisms)
initialismsUpperCased = asUpperCased(initialisms)
maxAllocMatches = maxAllocHeuristic(initialismsRunes)
// a test function
isInitialism = commonInitialisms.isInitialism
}
func asRunes(in []string) [][]rune {
out := make([][]rune, len(in))
for i, initialism := range in {
out[i] = []rune(initialism)
}
return out
}
func asUpperCased(in []string) [][]rune {
out := make([][]rune, len(in))
for i, initialism := range in {
out[i] = []rune(upper(trim(initialism)))
}
return out
}
func maxAllocHeuristic(in [][]rune) int {
heuristic := make(map[rune]int)
for _, initialism := range in {
heuristic[initialism[0]]++
}
var maxAlloc int
for _, val := range heuristic {
if val > maxAlloc {
maxAlloc = val
}
}
return maxAlloc
}
// AddInitialisms add additional initialisms
func AddInitialisms(words ...string) {
for _, word := range words {
// commonInitialisms[upper(word)] = true
commonInitialisms.add(upper(word))
}
// sort again
initialisms = commonInitialisms.sorted()
initialismsRunes = asRunes(initialisms)
initialismsUpperCased = asUpperCased(initialisms)
}
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
// Since go1.9, this may be implemented with sync.Map.
type indexOfInitialisms struct {
@ -55,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
func (m *indexOfInitialisms) sorted() (result []string) {
m.sortMutex.Lock()
defer m.sortMutex.Unlock()
m.index.Range(func(key, value interface{}) bool {
m.index.Range(func(key, _ interface{}) bool {
k := key.(string)
result = append(result, k)
return true
@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) {
sort.Sort(sort.Reverse(byInitialism(result)))
return
}
type byInitialism []string
func (s byInitialism) Len() int {
return len(s)
}
func (s byInitialism) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byInitialism) Less(i, j int) bool {
if len(s[i]) != len(s[j]) {
return len(s[i]) < len(s[j])
}
return strings.Compare(s[i], s[j]) > 0
}

View file

@ -14,74 +14,80 @@
package swag
import "unicode"
import (
"unicode"
"unicode/utf8"
)
type (
nameLexem interface {
GetUnsafeGoName() string
GetOriginal() string
IsInitialism() bool
}
lexemKind uint8
initialismNameLexem struct {
nameLexem struct {
original string
matchedInitialism string
}
casualNameLexem struct {
original string
kind lexemKind
}
)
func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
return &initialismNameLexem{
const (
lexemKindCasualName lexemKind = iota
lexemKindInitialismName
)
func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
return nameLexem{
kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
func newCasualNameLexem(original string) *casualNameLexem {
return &casualNameLexem{
func newCasualNameLexem(original string) nameLexem {
return nameLexem{
kind: lexemKindCasualName,
original: original,
}
}
func (l *initialismNameLexem) GetUnsafeGoName() string {
func (l nameLexem) GetUnsafeGoName() string {
if l.kind == lexemKindInitialismName {
return l.matchedInitialism
}
func (l *casualNameLexem) GetUnsafeGoName() string {
var first rune
var rest string
var (
first rune
rest string
)
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
if i > 0 {
rest = l.original[i:]
break
}
}
if len(l.original) > 1 {
return string(unicode.ToUpper(first)) + lower(rest)
b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
defer func() {
poolOfBuffers.RedeemBuffer(b)
}()
b.WriteRune(unicode.ToUpper(first))
b.WriteString(lower(rest))
return b.String()
}
return l.original
}
func (l *initialismNameLexem) GetOriginal() string {
func (l nameLexem) GetOriginal() string {
return l.original
}
func (l *casualNameLexem) GetOriginal() string {
return l.original
}
func (l *initialismNameLexem) IsInitialism() bool {
return true
}
func (l *casualNameLexem) IsInitialism() bool {
return false
func (l nameLexem) IsInitialism() bool {
return l.kind == lexemKindInitialismName
}

View file

@ -15,95 +15,239 @@
package swag
import (
"bytes"
"sync"
"unicode"
"unicode/utf8"
)
var nameReplaceTable = map[rune]string{
'@': "At ",
'&': "And ",
'|': "Pipe ",
'$': "Dollar ",
'!': "Bang ",
'-': "",
'_': "",
}
type (
splitter struct {
postSplitInitialismCheck bool
initialisms []string
initialismsRunes [][]rune
initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
postSplitInitialismCheck bool
}
splitterOption func(*splitter) *splitter
splitterOption func(*splitter)
initialismMatch struct {
body []rune
start, end int
complete bool
}
initialismMatches []initialismMatch
)
// split calls the splitter; splitter provides more control and post options
func split(str string) []string {
lexems := newSplitter().split(str)
result := make([]string, 0, len(lexems))
type (
// memory pools of temporary objects.
//
// These are used to recycle temporarily allocated objects
// and relieve the GC from undue pressure.
for _, lexem := range lexems {
matchesPool struct {
*sync.Pool
}
buffersPool struct {
*sync.Pool
}
lexemsPool struct {
*sync.Pool
}
splittersPool struct {
*sync.Pool
}
)
var (
// poolOfMatches holds temporary slices for recycling during the initialism match process
poolOfMatches = matchesPool{
Pool: &sync.Pool{
New: func() any {
s := make(initialismMatches, 0, maxAllocMatches)
return &s
},
},
}
poolOfBuffers = buffersPool{
Pool: &sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
},
}
poolOfLexems = lexemsPool{
Pool: &sync.Pool{
New: func() any {
s := make([]nameLexem, 0, maxAllocMatches)
return &s
},
},
}
poolOfSplitters = splittersPool{
Pool: &sync.Pool{
New: func() any {
s := newSplitter()
return &s
},
},
}
)
// nameReplaceTable finds a word representation for special characters.
func nameReplaceTable(r rune) (string, bool) {
switch r {
case '@':
return "At ", true
case '&':
return "And ", true
case '|':
return "Pipe ", true
case '$':
return "Dollar ", true
case '!':
return "Bang ", true
case '-':
return "", true
case '_':
return "", true
default:
return "", false
}
}
// split calls the splitter.
//
// Use newSplitter for more control and options
func split(str string) []string {
s := poolOfSplitters.BorrowSplitter()
lexems := s.split(str)
result := make([]string, 0, len(*lexems))
for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
poolOfLexems.RedeemLexems(lexems)
poolOfSplitters.RedeemSplitter(s)
return result
}
func (s *splitter) split(str string) []nameLexem {
return s.toNameLexems(str)
}
func newSplitter(options ...splitterOption) *splitter {
splitter := &splitter{
func newSplitter(options ...splitterOption) splitter {
s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
initialismsRunes: initialismsRunes,
initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
splitter = option(splitter)
option(&s)
}
return splitter
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
func withPostSplitInitialismCheck(s *splitter) *splitter {
s.postSplitInitialismCheck = true
return s
}
type (
initialismMatch struct {
start, end int
body []rune
complete bool
// withPostSplitInitialismCheck allows to catch initialisms after main split process
func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
}
initialismMatches []*initialismMatch
)
func (s *splitter) toNameLexems(name string) []nameLexem {
func (p matchesPool) BorrowMatches() *initialismMatches {
s := p.Get().(*initialismMatches)
*s = (*s)[:0] // reset slice, keep allocated capacity
return s
}
func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
s := p.Get().(*bytes.Buffer)
s.Reset()
if s.Cap() < size {
s.Grow(size)
}
return s
}
func (p lexemsPool) BorrowLexems() *[]nameLexem {
s := p.Get().(*[]nameLexem)
*s = (*s)[:0] // reset slice, keep allocated capacity
return s
}
func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
s := p.Get().(*splitter)
s.postSplitInitialismCheck = false // reset options
for _, apply := range options {
apply(s)
}
return s
}
func (p matchesPool) RedeemMatches(s *initialismMatches) {
p.Put(s)
}
func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
p.Put(s)
}
func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
p.Put(s)
}
func (p splittersPool) RedeemSplitter(s *splitter) {
p.Put(s)
}
func (m initialismMatch) isZero() bool {
return m.start == 0 && m.end == 0
}
func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
if matches == nil {
return poolOfLexems.BorrowLexems()
}
return s.mapMatchesToNameLexems(nameRunes, matches)
}
func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
matches := make(initialismMatches, 0)
func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
newMatches := make(initialismMatches, 0, len(matches))
// recycle these allocations as we loop over runes
// with such recycling, only 2 slices should be allocated per call
// instead of o(n).
newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
for _, match := range matches {
if matches != nil { // skip first iteration
for _, match := range *matches {
if keepCompleteMatch := match.complete; keepCompleteMatch {
newMatches = append(newMatches, match)
*newMatches = append(*newMatches, match)
continue
}
// drop failed match
currentMatchRune := match.body[currentRunePosition-match.start]
if !s.initialismRuneEqual(currentMatchRune, currentRune) {
if currentMatchRune != currentRune {
continue
}
@ -125,14 +269,15 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
match.end = currentRunePosition
}
newMatches = append(newMatches, match)
*newMatches = append(*newMatches, match)
}
}
// check for new initialism matches
for _, initialism := range s.initialisms {
initialismRunes := []rune(initialism)
if s.initialismRuneEqual(initialismRunes[0], currentRune) {
newMatches = append(newMatches, &initialismMatch{
for i := range s.initialisms {
initialismRunes := s.initialismsRunes[i]
if initialismRunes[0] == currentRune {
*newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
if matches != nil {
poolOfMatches.RedeemMatches(matches)
}
matches = newMatches
}
// up to the caller to redeem this last slice
return matches
}
func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
nameLexems := make([]nameLexem, 0)
func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
nameLexems := poolOfLexems.BorrowLexems()
var lastAcceptedMatch *initialismMatch
for _, match := range matches {
var lastAcceptedMatch initialismMatch
for _, match := range *matches {
if !match.complete {
continue
}
if firstMatch := lastAcceptedMatch == nil; firstMatch {
nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
nameLexems = append(nameLexems, s.breakCasualString(middle)...)
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
s.appendBrokenDownCasualString(nameLexems, middle)
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
if lastAcceptedMatch == nil {
return s.breakCasualString(nameRunes)
if lastAcceptedMatch.isZero() {
*nameLexems = (*nameLexems)[:0]
s.appendBrokenDownCasualString(nameLexems, nameRunes)
} else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
s.appendBrokenDownCasualString(nameLexems, rest)
}
if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
nameLexems = append(nameLexems, s.breakCasualString(rest)...)
}
poolOfMatches.RedeemMatches(matches)
return nameLexems
}
func (s *splitter) initialismRuneEqual(a, b rune) bool {
return a == b
}
func (s *splitter) breakInitialism(original string) nameLexem {
func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
func (s *splitter) breakCasualString(str []rune) []nameLexem {
segments := make([]nameLexem, 0)
currentSegment := ""
func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
defer func() {
poolOfBuffers.RedeemBuffer(currentSegment)
}()
addCasualNameLexem := func(original string) {
segments = append(segments, newCasualNameLexem(original))
*segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
segments = append(segments, newInitialismNameLexem(original, match))
*segments = append(*segments, newInitialismNameLexem(original, match))
}
addNameLexem := func(original string) {
var addNameLexem func(string)
if s.postSplitInitialismCheck {
for _, initialism := range s.initialisms {
if upper(initialism) == upper(original) {
addInitialismNameLexem(original, initialism)
addNameLexem = func(original string) {
for i := range s.initialisms {
if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
addInitialismNameLexem(original, s.initialisms[i])
return
}
}
}
addCasualNameLexem(original)
}
} else {
addNameLexem = addCasualNameLexem
}
for _, rn := range string(str) {
if replace, found := nameReplaceTable[rn]; found {
if currentSegment != "" {
addNameLexem(currentSegment)
currentSegment = ""
for _, rn := range str {
if replace, found := nameReplaceTable(rn); found {
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
currentSegment.Reset()
}
if replace != "" {
@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
if currentSegment != "" {
addNameLexem(currentSegment)
currentSegment = ""
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
if currentSegment != "" {
addNameLexem(currentSegment)
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
}
currentSegment = ""
currentSegment.Reset()
}
currentSegment += string(rn)
currentSegment.WriteRune(rn)
}
if currentSegment != "" {
addNameLexem(currentSegment)
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
}
}
return segments
// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
// it ignores leading and trailing blank spaces in the compared
// string.
//
// base is assumed to be composed of upper-cased runes, and be already
// trimmed.
//
// This code is heavily inspired from strings.EqualFold.
func isEqualFoldIgnoreSpace(base []rune, str string) bool {
var i, baseIndex int
// equivalent to b := []byte(str), but without data copy
b := hackStringBytes(str)
for i < len(b) {
if c := b[i]; c < utf8.RuneSelf {
// fast path for ASCII
if c != ' ' && c != '\t' {
break
}
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if !unicode.IsSpace(r) {
break
}
i += size
}
if i >= len(b) {
return len(base) == 0
}
for _, baseRune := range base {
if i >= len(b) {
break
}
if c := b[i]; c < utf8.RuneSelf {
// single byte rune case (ASCII)
if baseRune >= utf8.RuneSelf {
return false
}
baseChar := byte(baseRune)
if c != baseChar &&
!('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
return false
}
baseIndex++
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if unicode.ToUpper(r) != baseRune {
return false
}
baseIndex++
i += size
}
if baseIndex != len(base) {
return false
}
// all passed: now we should only have blanks
for i < len(b) {
if c := b[i]; c < utf8.RuneSelf {
// fast path for ASCII
if c != ' ' && c != '\t' {
return false
}
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if !unicode.IsSpace(r) {
return false
}
i += size
}
return true
}

8
vendor/github.com/go-openapi/swag/string_bytes.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package swag
import "unsafe"
// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
func hackStringBytes(str string) []byte {
return unsafe.Slice(unsafe.StringData(str), len(str))
}

View file

@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// commonInitialisms are common acronyms that are kept as whole uppercased words.
var commonInitialisms *indexOfInitialisms
// initialisms is a slice of sorted initialisms
var initialisms []string
var isInitialism func(string) bool
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
// The prefix function is assumed to return a string that starts with an upper case letter.
//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
func init() {
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
var configuredInitialisms = map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTPS": true,
"HTTP": true,
"ID": true,
"IP": true,
"IPv4": true,
"IPv6": true,
"JSON": true,
"LHS": true,
"OAI": true,
"QPS": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"UUID": true,
"URI": true,
"URL": true,
"UTF8": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
func prefixFunc(name, in string) string {
if GoNamePrefixFunc == nil {
return "X" + in
}
// a thread-safe index of initialisms
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
initialisms = commonInitialisms.sorted()
// a test function
isInitialism = commonInitialisms.isInitialism
return GoNamePrefixFunc(name) + in
}
const (
@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
type byInitialism []string
func (s byInitialism) Len() int {
return len(s)
}
func (s byInitialism) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byInitialism) Less(i, j int) bool {
if len(s[i]) != len(s[j]) {
return len(s[i]) < len(s[j])
}
return strings.Compare(s[i], s[j]) > 0
}
// Removes leading whitespaces
func trim(str string) string {
return strings.Trim(str, " ")
return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
func Camelize(word string) (camelized string) {
func Camelize(word string) string {
camelized := poolOfBuffers.BorrowBuffer(len(word))
defer func() {
poolOfBuffers.RedeemBuffer(camelized)
}()
for pos, ru := range []rune(word) {
if pos > 0 {
camelized += string(unicode.ToLower(ru))
camelized.WriteRune(unicode.ToLower(ru))
} else {
camelized += string(unicode.ToUpper(ru))
camelized.WriteRune(unicode.ToUpper(ru))
}
}
return
return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
in := newSplitter(withPostSplitInitialismCheck).split(name)
out := make([]string, 0, len(in))
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
in := s.split(name)
poolOfSplitters.RedeemSplitter(s)
out := make([]string, 0, len(*in))
for _, w := range in {
for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
out = append(out, w.GetOriginal())
out = append(out, trim(w.GetOriginal()))
}
}
poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
in := newSplitter(withPostSplitInitialismCheck).split(name)
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
in := s.split(name)
poolOfSplitters.RedeemSplitter(s)
out := make([]string, 0, len(in))
for _, w := range in {
original := w.GetOriginal()
out := make([]string, 0, len(*in))
for _, w := range *in {
original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
out = append(out, Camelize(w))
out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
lexems := newSplitter(withPostSplitInitialismCheck).split(name)
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
lexems := s.split(name)
poolOfSplitters.RedeemSplitter(s)
defer func() {
poolOfLexems.RedeemLexems(lexems)
}()
lexemes := *lexems
result := ""
for _, lexem := range lexems {
if len(lexemes) == 0 {
return ""
}
result := poolOfBuffers.BorrowBuffer(len(name))
defer func() {
poolOfBuffers.RedeemBuffer(result)
}()
// check if not starting with a letter, upper case
firstPart := lexemes[0].GetUnsafeGoName()
if lexemes[0].IsInitialism() {
firstPart = upper(firstPart)
}
if c := firstPart[0]; c < utf8.RuneSelf {
// ASCII
switch {
case 'A' <= c && c <= 'Z':
result.WriteString(firstPart)
case 'a' <= c && c <= 'z':
result.WriteByte(c - 'a' + 'A')
result.WriteString(firstPart[1:])
default:
result.WriteString(prefixFunc(name, firstPart))
// NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
// assume this is always the case
}
} else {
// unicode
firstRune, _ := utf8.DecodeRuneInString(firstPart)
switch {
case !unicode.IsLetter(firstRune):
result.WriteString(prefixFunc(name, firstPart))
case !unicode.IsUpper(firstRune):
result.WriteString(prefixFunc(name, firstPart))
/*
result.WriteRune(unicode.ToUpper(firstRune))
result.WriteString(firstPart[offset:])
*/
default:
result.WriteString(firstPart)
}
}
for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
result += goName
result.WriteString(goName)
}
if len(result) > 0 {
// Only prefix with X when the first character isn't an ascii letter
first := []rune(result)[0]
if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
if GoNamePrefixFunc == nil {
return "X" + result
}
result = GoNamePrefixFunc(name) + result
}
first = []rune(result)[0]
if unicode.IsLetter(first) && !unicode.IsUpper(first) {
result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
}
}
return result
return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
// AddInitialisms add additional initialisms
func AddInitialisms(words ...string) {
for _, word := range words {
// commonInitialisms[upper(word)] = true
commonInitialisms.add(upper(word))
}
// sort again
initialisms = commonInitialisms.sorted()
}
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string

View file

@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
"errors"
"fmt"
"path/filepath"
"reflect"
"sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
return nil, fmt.Errorf("only YAML documents that are objects are supported")
return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
func isNil(input interface{}) bool {
if input == nil {
return true
}
kind := reflect.TypeOf(input).Kind()
switch kind { //nolint:exhaustive
case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
return reflect.ValueOf(input).IsNil()
default:
return false
}
}
func json2yaml(item interface{}) (*yaml.Node, error) {
if isNil(item) {
return &yaml.Node{
Kind: yaml.ScalarNode,
Value: "null",
}, nil
}
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
for k, v := range val {
keys := make([]string, 0, len(val))
for k := range val {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@ -318,9 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
default:
return nil, fmt.Errorf("unhandled type: %T", val)
}
return nil, nil //nolint:nilnil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice

31
vendor/github.com/go-openapi/validate/BENCHMARK.md generated vendored Normal file
View file

@ -0,0 +1,31 @@
# Benchmark
Validating the Kubernetes Swagger API
## v0.22.6: 60,000,000 allocs
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/validate
cpu: AMD Ryzen 7 5800X 8-Core Processor
Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op
```
## After refact PR: minor but noticable improvements: 25,000,000 allocs
```
go test -bench Spec
goos: linux
goarch: amd64
pkg: github.com/go-openapi/validate
cpu: AMD Ryzen 7 5800X 8-Core Processor
Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op
```
## After reduce GC pressure PR: 17,000,000 allocs
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/validate
cpu: AMD Ryzen 7 5800X 8-Core Processor
Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op
```

View file

@ -25,48 +25,55 @@ import (
// According to Swagger spec, default values MUST validate their schema.
type defaultValidator struct {
SpecValidator *SpecValidator
visitedSchemas map[string]bool
visitedSchemas map[string]struct{}
schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (d *defaultValidator) resetVisited() {
d.visitedSchemas = map[string]bool{}
if d.visitedSchemas == nil {
d.visitedSchemas = make(map[string]struct{})
return
}
// TODO(go1.21): clear(ex.visitedSchemas)
for k := range d.visitedSchemas {
delete(d.visitedSchemas, k)
}
}
func isVisited(path string, visitedSchemas map[string]struct{}) bool {
_, found := visitedSchemas[path]
if found {
return true
}
func isVisited(path string, visitedSchemas map[string]bool) bool {
found := visitedSchemas[path]
if !found {
// search for overlapping paths
frags := strings.Split(path, ".")
if len(frags) < 2 {
// shortcut exit on smaller paths
return found
var (
parent string
suffix string
)
for i := len(path) - 2; i >= 0; i-- {
r := path[i]
if r != '.' {
continue
}
last := len(frags) - 1
var currentFragStr, parent string
for i := range frags {
if i == 0 {
currentFragStr = frags[last]
} else {
currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".")
}
if i < last {
parent = strings.Join(frags[0:last-i], ".")
} else {
parent = ""
}
if strings.HasSuffix(parent, currentFragStr) {
found = true
break
parent = path[0:i]
suffix = path[i+1:]
if strings.HasSuffix(parent, suffix) {
return true
}
}
}
return found
return false
}
// beingVisited asserts a schema is being visited
func (d *defaultValidator) beingVisited(path string) {
d.visitedSchemas[path] = true
d.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool {
}
// Validate validates the default values declared in the swagger spec
func (d *defaultValidator) Validate() (errs *Result) {
errs = new(Result)
func (d *defaultValidator) Validate() *Result {
errs := pools.poolOfResults.BorrowResult() // will redeem when merged
if d == nil || d.SpecValidator == nil {
return errs
}
@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// every default value that is specified must validate against the schema for that property
// headers, items, parameters, schema
res := new(Result)
res := pools.poolOfResults.BorrowResult() // will redeem when merged
s := d.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@ -107,10 +115,12 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Default != nil && param.Schema == nil {
// check param default value is valid
red := NewParamValidator(&param, s.KnownFormats).Validate(param.Default) //#nosec
red := newParamValidator(&param, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -120,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -129,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
}
@ -154,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
d.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@ -176,10 +190,12 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
d.resetVisited()
if h.Default != nil {
red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec
red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -189,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
if red.HasErrorsOrWarnings() {
res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -208,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon
// Additional message to make sure the context of the error is not lost
res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
return res
@ -219,11 +239,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
return nil
}
d.beingVisited(path)
res := new(Result)
res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if schema.Default != nil {
res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default))
res.Merge(
newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default),
)
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@ -241,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well)
res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@ -250,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@ -263,11 +285,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri
// TODO: Temporary duplicated code. Need to refactor with examples
func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
res := new(Result)
res := pools.poolOfResults.BorrowResult()
s := d.SpecValidator
if items != nil {
if items.Default != nil {
res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default))
res.Merge(
newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default),
)
}
if items.Items != nil {
res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items))

View file

@ -23,17 +23,27 @@ import (
// ExampleValidator validates example values defined in a spec
type exampleValidator struct {
SpecValidator *SpecValidator
visitedSchemas map[string]bool
visitedSchemas map[string]struct{}
schemaOptions *SchemaValidatorOptions
}
// resetVisited resets the internal state of visited schemas
func (ex *exampleValidator) resetVisited() {
ex.visitedSchemas = map[string]bool{}
if ex.visitedSchemas == nil {
ex.visitedSchemas = make(map[string]struct{})
return
}
// TODO(go1.21): clear(ex.visitedSchemas)
for k := range ex.visitedSchemas {
delete(ex.visitedSchemas, k)
}
}
// beingVisited asserts a schema is being visited
func (ex *exampleValidator) beingVisited(path string) {
ex.visitedSchemas[path] = true
ex.visitedSchemas[path] = struct{}{}
}
// isVisited tells if a path has already been visited
@ -48,8 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool {
// - schemas
// - individual property
// - responses
func (ex *exampleValidator) Validate() (errs *Result) {
errs = new(Result)
func (ex *exampleValidator) Validate() *Result {
errs := pools.poolOfResults.BorrowResult()
if ex == nil || ex.SpecValidator == nil {
return errs
}
@ -64,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// in: schemas, properties, object, items
// not in: headers, parameters without schema
res := new(Result)
res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
for method, pathItem := range s.expandedAnalyzer().Operations() {
@ -82,10 +93,12 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// default values provided must validate against their inline definition (no explicit schema)
if param.Example != nil && param.Schema == nil {
// check param default value is valid
red := NewParamValidator(&param, s.KnownFormats).Validate(param.Example) //#nosec
red := newParamValidator(&param, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.MergeAsWarnings(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -95,6 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -104,6 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
}
@ -129,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result {
// reset explored schemas to get depth-first recursive-proof exploration
ex.resetVisited()
for nm, sch := range s.spec.Spec().Definitions {
res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec
res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec
}
}
return res
@ -151,10 +168,12 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
ex.resetVisited()
if h.Example != nil {
red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec
red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -164,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
if red.HasErrorsOrWarnings() {
res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName))
res.MergeAsWarnings(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
@ -183,13 +204,17 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo
// Additional message to make sure the context of the error is not lost
res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName))
res.Merge(red)
} else if red.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(red)
}
}
if response.Examples != nil {
if response.Schema != nil {
if example, ok := response.Examples["application/json"]; ok {
res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example))
res.MergeAsWarnings(
newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example),
)
} else {
// TODO: validate other media types too
res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName))
@ -208,10 +233,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
ex.beingVisited(path)
s := ex.SpecValidator
res := new(Result)
res := pools.poolOfResults.BorrowResult()
if schema.Example != nil {
res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example))
res.MergeAsWarnings(
newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example),
)
}
if schema.Items != nil {
if schema.Items.Schema != nil {
@ -229,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
// NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well)
res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema))
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema))
}
for propName, prop := range schema.Properties {
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
@ -238,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema))
res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema))
}
if schema.AllOf != nil {
for i, aoSch := range schema.AllOf {
@ -252,11 +279,13 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str
//
func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result {
res := new(Result)
res := pools.poolOfResults.BorrowResult()
s := ex.SpecValidator
if items != nil {
if items.Example != nil {
res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example))
res.MergeAsWarnings(
newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example),
)
}
if items.Items != nil {
res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items))
@ -265,5 +294,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri
res.AddErrors(invalidPatternInMsg(path, in, items.Pattern))
}
}
return res
}

View file

@ -22,10 +22,32 @@ import (
)
type formatValidator struct {
Format string
Path string
In string
Format string
KnownFormats strfmt.Registry
Options *SchemaValidatorOptions
}
func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator {
if opts == nil {
opts = new(SchemaValidatorOptions)
}
var f *formatValidator
if opts.recycleValidators {
f = pools.poolOfFormatValidators.BorrowValidator()
} else {
f = new(formatValidator)
}
f.Path = path
f.In = in
f.Format = format
f.KnownFormats = formats
f.Options = opts
return f
}
func (f *formatValidator) SetPath(path string) {
@ -33,10 +55,10 @@ func (f *formatValidator) SetPath(path string) {
}
func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool {
doit := func() bool {
if source == nil {
if source == nil || f.KnownFormats == nil {
return false
}
switch source := source.(type) {
case *spec.Items:
return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
@ -46,24 +68,32 @@ func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool {
return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
case *spec.Header:
return kind == reflect.String && f.KnownFormats.ContainsName(source.Format)
}
default:
return false
}
r := doit()
debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind)
return r
}
func (f *formatValidator) Validate(val interface{}) *Result {
result := new(Result)
debugLog("validating \"%v\" against format: %s", val, f.Format)
if f.Options.recycleValidators {
defer func() {
f.redeem()
}()
}
var result *Result
if f.Options.recycleResult {
result = pools.poolOfResults.BorrowResult()
} else {
result = new(Result)
}
if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil {
result.AddErrors(err)
}
if result.HasErrors() {
return result
}
return nil
func (f *formatValidator) redeem() {
pools.poolOfFormatValidators.RedeemValidator(f)
}

View file

@ -101,9 +101,17 @@ type errorHelper struct {
// A collection of unexported helpers for error construction
}
func (h *errorHelper) sErr(err errors.Error) *Result {
func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result {
// Builds a Result from standard errors.Error
return &Result{Errors: []error{err}}
var result *Result
if recycle {
result = pools.poolOfResults.BorrowResult()
} else {
result = new(Result)
}
result.Errors = []error{err}
return result
}
func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result {
@ -225,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re
operation.Parameters = resolvedParams
for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path,
func(p spec.Parameter, err error) bool {
func(_ spec.Parameter, err error) bool {
// since params have already been expanded, there are few causes for error
res.AddErrors(someParametersBrokenMsg(path, method, operationID))
// original error from analyzer
@ -306,6 +314,7 @@ func (r *responseHelper) expandResponseRef(
errorHelp.addPointerError(res, err, response.Ref.String(), path)
return nil, res
}
return response, res
}

View file

@ -15,8 +15,8 @@
package validate
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/go-openapi/errors"
@ -35,62 +35,116 @@ type objectValidator struct {
PatternProperties map[string]spec.Schema
Root interface{}
KnownFormats strfmt.Registry
Options SchemaValidatorOptions
Options *SchemaValidatorOptions
splitPath []string
}
func newObjectValidator(path, in string,
maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties,
additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties,
root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator {
if opts == nil {
opts = new(SchemaValidatorOptions)
}
var v *objectValidator
if opts.recycleValidators {
v = pools.poolOfObjectValidators.BorrowValidator()
} else {
v = new(objectValidator)
}
v.Path = path
v.In = in
v.MaxProperties = maxProperties
v.MinProperties = minProperties
v.Required = required
v.Properties = properties
v.AdditionalProperties = additionalProperties
v.PatternProperties = patternProperties
v.Root = root
v.KnownFormats = formats
v.Options = opts
v.splitPath = strings.Split(v.Path, ".")
return v
}
func (o *objectValidator) SetPath(path string) {
o.Path = path
o.splitPath = strings.Split(path, ".")
}
func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool {
// TODO: this should also work for structs
// there is a problem in the type validator where it will be unhappy about null values
// so that requires more testing
r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct)
debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind)
return r
_, isSchema := source.(*spec.Schema)
return isSchema && (kind == reflect.Map || kind == reflect.Struct)
}
func (o *objectValidator) isProperties() bool {
p := strings.Split(o.Path, ".")
p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties
}
func (o *objectValidator) isDefault() bool {
p := strings.Split(o.Path, ".")
p := o.splitPath
return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault
}
func (o *objectValidator) isExample() bool {
p := strings.Split(o.Path, ".")
p := o.splitPath
return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample
}
func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) {
// for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly.
// with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type).
if t, typeFound := val[jsonType]; typeFound {
if tpe, ok := t.(string); ok && tpe == arrayType {
if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound {
if val == nil {
return
}
t, typeFound := val[jsonType]
if !typeFound {
return
}
tpe, isString := t.(string)
if !isString || tpe != arrayType {
return
}
item, itemsKeyFound := val[jsonItems]
if itemsKeyFound {
return
}
res.AddErrors(errors.Required(jsonItems, o.Path, item))
}
}
}
}
func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) {
if !o.isProperties() && !o.isDefault() && !o.isExample() {
if _, itemsKeyFound := val[jsonItems]; itemsKeyFound {
t, typeFound := val[jsonType]
if typeFound {
if tpe, ok := t.(string); !ok || tpe != arrayType {
res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
if val == nil {
return
}
} else {
if o.isProperties() || o.isDefault() || o.isExample() {
return
}
_, itemsKeyFound := val[jsonItems]
if !itemsKeyFound {
return
}
t, typeFound := val[jsonType]
if !typeFound {
// there is no type
res.AddErrors(errors.Required(jsonType, o.Path, t))
}
}
if tpe, isString := t.(string); !isString || tpe != arrayType {
res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil))
}
}
@ -104,37 +158,96 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) {
}
func (o *objectValidator) Validate(data interface{}) *Result {
val := data.(map[string]interface{})
// TODO: guard against nil data
if o.Options.recycleValidators {
defer func() {
o.redeem()
}()
}
var val map[string]interface{}
if data != nil {
var ok bool
val, ok = data.(map[string]interface{})
if !ok {
return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult)
}
}
numKeys := int64(len(val))
if o.MinProperties != nil && numKeys < *o.MinProperties {
return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties))
return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult)
}
if o.MaxProperties != nil && numKeys > *o.MaxProperties {
return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties))
return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult)
}
res := new(Result)
var res *Result
if o.Options.recycleResult {
res = pools.poolOfResults.BorrowResult()
} else {
res = new(Result)
}
o.precheck(res, val)
// check validity of field names
if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows {
// Case: additionalProperties: false
for k := range val {
_, regularProperty := o.Properties[k]
matched := false
o.validateNoAdditionalProperties(val, res)
} else {
// Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <<schema>> }
o.validateAdditionalProperties(val, res)
}
o.validatePropertiesSchema(val, res)
// Check patternProperties
// TODO: it looks like we have done that twice in many cases
for key, value := range val {
_, regularProperty := o.Properties[key]
matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well
if regularProperty || !matched {
continue
}
for _, pName := range patterns {
if v, ok := o.PatternProperties[pName]; ok {
r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
res.mergeForField(data.(map[string]interface{}), key, r)
}
}
}
return res
}
func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) {
for k := range val {
if k == "$schema" || k == "id" {
// special properties "$schema" and "id" are ignored
continue
}
_, regularProperty := o.Properties[k]
if regularProperty {
continue
}
matched := false
for pk := range o.PatternProperties {
if matches, _ := regexp.MatchString(pk, k); matches {
re, err := compileRegexp(pk)
if err != nil {
continue
}
if matches := re.MatchString(k); matches {
matched = true
break
}
}
if matched {
continue
}
if !regularProperty && k != "$schema" && k != "id" && !matched {
// Special properties "$schema" and "id" are ignored
res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k))
// BUG(fredbi): This section should move to a part dedicated to spec validation as
@ -148,23 +261,38 @@ func (o *objectValidator) Validate(data interface{}) *Result {
// NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered
// by higher level callers (the IMPORTANT! tag will be eventually
// removed).
if k == "headers" && val[k] != nil {
if k != "headers" || val[k] == nil {
continue
}
// $ref is forbidden in header
if headers, mapOk := val[k].(map[string]interface{}); mapOk {
headers, mapOk := val[k].(map[string]interface{})
if !mapOk {
continue
}
for headerKey, headerBody := range headers {
if headerBody != nil {
if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk {
if _, found := headerSchema["$ref"]; found {
var msg string
if refString, stringOk := headerSchema["$ref"].(string); stringOk {
msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
if headerBody == nil {
continue
}
headerSchema, mapOfMapOk := headerBody.(map[string]interface{})
if !mapOfMapOk {
continue
}
_, found := headerSchema["$ref"]
if !found {
continue
}
refString, stringOk := headerSchema["$ref"].(string)
if !stringOk {
continue
}
msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "")
res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg))
}
}
}
}
}
/*
case "$ref":
if val[k] != nil {
@ -174,106 +302,130 @@ func (o *objectValidator) Validate(data interface{}) *Result {
}
}
}
} else {
// Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <<schema>> }
func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) {
for key, value := range val {
_, regularProperty := o.Properties[key]
if regularProperty {
continue
}
// Validates property against "patternProperties" if applicable
// BUG(fredbi): succeededOnce is always false
// NOTE: how about regular properties which do not match patternProperties?
matched, succeededOnce, _ := o.validatePatternProperty(key, value, res)
if matched || succeededOnce {
continue
}
if !(regularProperty || matched || succeededOnce) {
if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil {
continue
}
// Cases: properties which are not regular properties and have not been matched by the PatternProperties validator
if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil {
// AdditionalProperties as Schema
r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
res.mergeForField(data.(map[string]interface{}), key, r)
} else if regularProperty && !(matched || succeededOnce) {
// TODO: this is dead code since regularProperty=false here
res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key))
}
}
r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value)
res.mergeForField(val, key, r)
}
// Valid cases: additionalProperties: true or undefined
}
createdFromDefaults := map[string]bool{}
func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) {
createdFromDefaults := map[string]struct{}{}
// Property types:
// - regular Property
pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties
defer func() {
pools.poolOfSchemas.RedeemSchema(pSchema)
}()
for pName := range o.Properties {
pSchema := o.Properties[pName] // one instance per iteration
rName := pName
if o.Path != "" {
*pSchema = o.Properties[pName]
var rName string
if o.Path == "" {
rName = pName
} else {
rName = o.Path + "." + pName
}
// Recursively validates each property against its schema
if v, ok := val[pName]; ok {
r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v)
res.mergeForField(data.(map[string]interface{}), pName, r)
} else if pSchema.Default != nil {
// If a default value is defined, creates the property from defaults
// NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
createdFromDefaults[pName] = true
res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema)
v, ok := val[pName]
if ok {
r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v)
res.mergeForField(val, pName, r)
continue
}
if pSchema.Default != nil {
// if a default value is defined, creates the property from defaults
// NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does.
createdFromDefaults[pName] = struct{}{}
if !o.Options.skipSchemataResult {
res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer
}
}
}
if len(o.Required) == 0 {
return
}
// Check required properties
if len(o.Required) > 0 {
for _, k := range o.Required {
if v, ok := val[k]; !ok && !createdFromDefaults[k] {
res.AddErrors(errors.Required(o.Path+"."+k, o.In, v))
v, ok := val[k]
if ok {
continue
}
}
_, isCreatedFromDefaults := createdFromDefaults[k]
if isCreatedFromDefaults {
continue
}
// Check patternProperties
// TODO: it looks like we have done that twice in many cases
for key, value := range val {
_, regularProperty := o.Properties[key]
matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res)
if !regularProperty && (matched /*|| succeededOnce*/) {
for _, pName := range patterns {
if v, ok := o.PatternProperties[pName]; ok {
r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value)
res.mergeForField(data.(map[string]interface{}), key, r)
res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v))
}
}
}
}
return res
}
// TODO: succeededOnce is not used anywhere
func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) {
if len(o.PatternProperties) == 0 {
return false, false, nil
}
matched := false
succeededOnce := false
var patterns []string
patterns := make([]string, 0, len(o.PatternProperties))
for k, schema := range o.PatternProperties {
sch := schema
if match, _ := regexp.MatchString(k, key); match {
schema := pools.poolOfSchemas.BorrowSchema()
defer func() {
pools.poolOfSchemas.RedeemSchema(schema)
}()
for k := range o.PatternProperties {
re, err := compileRegexp(k)
if err != nil {
continue
}
match := re.MatchString(key)
if !match {
continue
}
*schema = o.PatternProperties[k]
patterns = append(patterns, k)
matched = true
validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...)
validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options)
res := validator.Validate(value)
result.Merge(res)
}
}
// BUG(fredbi): can't get to here. Should remove dead code (commented out).
// if succeededOnce {
// result.Inc()
// }
return matched, succeededOnce, patterns
}
func (o *objectValidator) redeem() {
pools.poolOfObjectValidators.RedeemValidator(o)
}

View file

@ -31,6 +31,7 @@ type Opts struct {
// GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and
// /"shelve/*/book/*" respectively.
StrictPathParamUniqueness bool
SkipSchemataResult bool
}
var (

366
vendor/github.com/go-openapi/validate/pools.go generated vendored Normal file
View file

@ -0,0 +1,366 @@
//go:build !validatedebug
package validate
import (
"sync"
"github.com/go-openapi/spec"
)
var pools allPools
func init() {
resetPools()
}
func resetPools() {
// NOTE: for testing purpose, we might want to reset pools after calling Validate twice.
// The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool
// and further calls to Get are mishandled.
pools = allPools{
poolOfSchemaValidators: schemaValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &SchemaValidator{}
return s
},
},
},
poolOfObjectValidators: objectValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &objectValidator{}
return s
},
},
},
poolOfSliceValidators: sliceValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &schemaSliceValidator{}
return s
},
},
},
poolOfItemsValidators: itemsValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &itemsValidator{}
return s
},
},
},
poolOfBasicCommonValidators: basicCommonValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &basicCommonValidator{}
return s
},
},
},
poolOfHeaderValidators: headerValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &HeaderValidator{}
return s
},
},
},
poolOfParamValidators: paramValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &ParamValidator{}
return s
},
},
},
poolOfBasicSliceValidators: basicSliceValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &basicSliceValidator{}
return s
},
},
},
poolOfNumberValidators: numberValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &numberValidator{}
return s
},
},
},
poolOfStringValidators: stringValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &stringValidator{}
return s
},
},
},
poolOfSchemaPropsValidators: schemaPropsValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &schemaPropsValidator{}
return s
},
},
},
poolOfFormatValidators: formatValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &formatValidator{}
return s
},
},
},
poolOfTypeValidators: typeValidatorsPool{
Pool: &sync.Pool{
New: func() any {
s := &typeValidator{}
return s
},
},
},
poolOfSchemas: schemasPool{
Pool: &sync.Pool{
New: func() any {
s := &spec.Schema{}
return s
},
},
},
poolOfResults: resultsPool{
Pool: &sync.Pool{
New: func() any {
s := &Result{}
return s
},
},
},
}
}
type (
allPools struct {
// memory pools for all validator objects.
//
// Each pool can be borrowed from and redeemed to.
poolOfSchemaValidators schemaValidatorsPool
poolOfObjectValidators objectValidatorsPool
poolOfSliceValidators sliceValidatorsPool
poolOfItemsValidators itemsValidatorsPool
poolOfBasicCommonValidators basicCommonValidatorsPool
poolOfHeaderValidators headerValidatorsPool
poolOfParamValidators paramValidatorsPool
poolOfBasicSliceValidators basicSliceValidatorsPool
poolOfNumberValidators numberValidatorsPool
poolOfStringValidators stringValidatorsPool
poolOfSchemaPropsValidators schemaPropsValidatorsPool
poolOfFormatValidators formatValidatorsPool
poolOfTypeValidators typeValidatorsPool
poolOfSchemas schemasPool
poolOfResults resultsPool
}
schemaValidatorsPool struct {
*sync.Pool
}
objectValidatorsPool struct {
*sync.Pool
}
sliceValidatorsPool struct {
*sync.Pool
}
itemsValidatorsPool struct {
*sync.Pool
}
basicCommonValidatorsPool struct {
*sync.Pool
}
headerValidatorsPool struct {
*sync.Pool
}
paramValidatorsPool struct {
*sync.Pool
}
basicSliceValidatorsPool struct {
*sync.Pool
}
numberValidatorsPool struct {
*sync.Pool
}
stringValidatorsPool struct {
*sync.Pool
}
schemaPropsValidatorsPool struct {
*sync.Pool
}
formatValidatorsPool struct {
*sync.Pool
}
typeValidatorsPool struct {
*sync.Pool
}
schemasPool struct {
*sync.Pool
}
resultsPool struct {
*sync.Pool
}
)
func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator {
return p.Get().(*SchemaValidator)
}
func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) {
// NOTE: s might be nil. In that case, Put is a noop.
p.Put(s)
}
func (p objectValidatorsPool) BorrowValidator() *objectValidator {
return p.Get().(*objectValidator)
}
func (p objectValidatorsPool) RedeemValidator(s *objectValidator) {
p.Put(s)
}
func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator {
return p.Get().(*schemaSliceValidator)
}
func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) {
p.Put(s)
}
func (p itemsValidatorsPool) BorrowValidator() *itemsValidator {
return p.Get().(*itemsValidator)
}
func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) {
p.Put(s)
}
func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator {
return p.Get().(*basicCommonValidator)
}
func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) {
p.Put(s)
}
func (p headerValidatorsPool) BorrowValidator() *HeaderValidator {
return p.Get().(*HeaderValidator)
}
func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) {
p.Put(s)
}
func (p paramValidatorsPool) BorrowValidator() *ParamValidator {
return p.Get().(*ParamValidator)
}
func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) {
p.Put(s)
}
func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator {
return p.Get().(*basicSliceValidator)
}
func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) {
p.Put(s)
}
func (p numberValidatorsPool) BorrowValidator() *numberValidator {
return p.Get().(*numberValidator)
}
func (p numberValidatorsPool) RedeemValidator(s *numberValidator) {
p.Put(s)
}
func (p stringValidatorsPool) BorrowValidator() *stringValidator {
return p.Get().(*stringValidator)
}
func (p stringValidatorsPool) RedeemValidator(s *stringValidator) {
p.Put(s)
}
func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator {
return p.Get().(*schemaPropsValidator)
}
func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) {
p.Put(s)
}
func (p formatValidatorsPool) BorrowValidator() *formatValidator {
return p.Get().(*formatValidator)
}
func (p formatValidatorsPool) RedeemValidator(s *formatValidator) {
p.Put(s)
}
func (p typeValidatorsPool) BorrowValidator() *typeValidator {
return p.Get().(*typeValidator)
}
func (p typeValidatorsPool) RedeemValidator(s *typeValidator) {
p.Put(s)
}
func (p schemasPool) BorrowSchema() *spec.Schema {
return p.Get().(*spec.Schema)
}
func (p schemasPool) RedeemSchema(s *spec.Schema) {
p.Put(s)
}
func (p resultsPool) BorrowResult() *Result {
return p.Get().(*Result).cleared()
}
func (p resultsPool) RedeemResult(s *Result) {
if s == emptyResult {
return
}
p.Put(s)
}

1012
vendor/github.com/go-openapi/validate/pools_debug.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -15,7 +15,7 @@
package validate
import (
"fmt"
stderrors "errors"
"reflect"
"strings"
@ -23,6 +23,8 @@ import (
"github.com/go-openapi/spec"
)
var emptyResult = &Result{MatchCount: 1}
// Result represents a validation result set, composed of
// errors and warnings.
//
@ -50,8 +52,10 @@ type Result struct {
// Schemata for slice items
itemSchemata []itemSchemata
cachedFieldSchemta map[FieldKey][]*spec.Schema
cachedFieldSchemata map[FieldKey][]*spec.Schema
cachedItemSchemata map[ItemKey][]*spec.Schema
wantsRedeemOnMerge bool
}
// FieldKey is a pair of an object and a field, usable as a key for a map.
@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result {
}
r.mergeWithoutRootSchemata(other)
r.rootObjectSchemata.Append(other.rootObjectSchemata)
if other.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(other)
}
}
return r
}
@ -133,8 +140,8 @@ func (r *Result) RootObjectSchemata() []*spec.Schema {
// FieldSchemata returns the schemata which apply to fields in objects.
func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
if r.cachedFieldSchemta != nil {
return r.cachedFieldSchemta
if r.cachedFieldSchemata != nil {
return r.cachedFieldSchemata
}
ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata))
@ -146,7 +153,8 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema {
ret[key] = append(ret[key], fs.schemata.multiple...)
}
}
r.cachedFieldSchemta = ret
r.cachedFieldSchemata = ret
return ret
}
@ -170,7 +178,7 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema {
}
func (r *Result) resetCaches() {
r.cachedFieldSchemta = nil
r.cachedFieldSchemata = nil
r.cachedItemSchemata = nil
}
@ -187,12 +195,16 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other *
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, len(obj))
}
// clone other schemata, as other is about to be redeemed to the pool
r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{
obj: obj,
field: field,
schemata: other.rootObjectSchemata,
schemata: other.rootObjectSchemata.Clone(),
})
}
if other.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(other)
}
return r
}
@ -210,29 +222,38 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul
if r.itemSchemata == nil {
r.itemSchemata = make([]itemSchemata, slice.Len())
}
// clone other schemata, as other is about to be redeemed to the pool
r.itemSchemata = append(r.itemSchemata, itemSchemata{
slice: slice,
index: i,
schemata: other.rootObjectSchemata,
schemata: other.rootObjectSchemata.Clone(),
})
}
if other.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(other)
}
return r
}
// addRootObjectSchemata adds the given schemata for the root object of the result.
// The slice schemata might be reused. I.e. do not modify it after being added to a result.
//
// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addRootObjectSchemata(s *spec.Schema) {
r.rootObjectSchemata.Append(schemata{one: s})
clone := *s
r.rootObjectSchemata.Append(schemata{one: &clone})
}
// addPropertySchemata adds the given schemata for the object and field.
// The slice schemata might be reused. I.e. do not modify it after being added to a result.
//
// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result.
func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) {
if r.fieldSchemata == nil {
r.fieldSchemata = make([]fieldSchemata, 0, len(obj))
}
r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}})
clone := *schema
r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}})
}
/*
@ -255,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) {
if other.fieldSchemata != nil {
if r.fieldSchemata == nil {
r.fieldSchemata = other.fieldSchemata
} else {
r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...)
r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata))
}
for _, field := range other.fieldSchemata {
field.schemata = field.schemata.Clone()
r.fieldSchemata = append(r.fieldSchemata, field)
}
}
if other.itemSchemata != nil {
if r.itemSchemata == nil {
r.itemSchemata = other.itemSchemata
} else {
r.itemSchemata = append(r.itemSchemata, other.itemSchemata...)
r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata))
}
for _, field := range other.itemSchemata {
field.schemata = field.schemata.Clone()
r.itemSchemata = append(r.itemSchemata, field)
}
}
}
@ -280,6 +305,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result {
r.AddErrors(other.Errors...)
r.AddErrors(other.Warnings...)
r.MatchCount += other.MatchCount
if other.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(other)
}
}
}
return r
@ -295,6 +323,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result {
r.AddWarnings(other.Errors...)
r.AddWarnings(other.Warnings...)
r.MatchCount += other.MatchCount
if other.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(other)
}
}
}
return r
@ -356,16 +387,21 @@ func (r *Result) keepRelevantErrors() *Result {
strippedErrors := []error{}
for _, e := range r.Errors {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
strippedWarnings := []error{}
for _, e := range r.Warnings {
if strings.HasPrefix(e.Error(), "IMPORTANT!") {
strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!")))
}
}
strippedResult := new(Result)
var strippedResult *Result
if r.wantsRedeemOnMerge {
strippedResult = pools.poolOfResults.BorrowResult()
} else {
strippedResult = new(Result)
}
strippedResult.Errors = strippedErrors
strippedResult.Warnings = strippedWarnings
return strippedResult
@ -427,6 +463,27 @@ func (r *Result) AsError() error {
return errors.CompositeValidationError(r.Errors...)
}
func (r *Result) cleared() *Result {
// clear the Result to be reusable. Keep allocated capacity.
r.Errors = r.Errors[:0]
r.Warnings = r.Warnings[:0]
r.MatchCount = 0
r.data = nil
r.rootObjectSchemata.one = nil
r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0]
r.fieldSchemata = r.fieldSchemata[:0]
r.itemSchemata = r.itemSchemata[:0]
for k := range r.cachedFieldSchemata {
delete(r.cachedFieldSchemata, k)
}
for k := range r.cachedItemSchemata {
delete(r.cachedItemSchemata, k)
}
r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another
return r
}
// schemata is an arbitrary number of schemata. It does a distinction between zero,
// one and many schemata to avoid slice allocations.
type schemata struct {
@ -453,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema {
return s.multiple
}
// appendSchemata appends the schemata in other to s. It mutated s in-place.
// appendSchemata appends the schemata in other to s. It mutates s in-place.
func (s *schemata) Append(other schemata) {
if other.one == nil && len(other.multiple) == 0 {
return
@ -484,3 +541,23 @@ func (s *schemata) Append(other schemata) {
}
}
}
func (s schemata) Clone() schemata {
var clone schemata
if s.one != nil {
clone.one = new(spec.Schema)
*clone.one = *s.one
}
if len(s.multiple) > 0 {
clone.multiple = make([]*spec.Schema, len(s.multiple))
for idx := 0; idx < len(s.multiple); idx++ {
sp := new(spec.Schema)
*sp = *s.multiple[idx]
clone.multiple[idx] = sp
}
}
return clone
}

View file

@ -24,32 +24,32 @@ import (
"github.com/go-openapi/swag"
)
var (
specSchemaType = reflect.TypeOf(&spec.Schema{})
specParameterType = reflect.TypeOf(&spec.Parameter{})
specHeaderType = reflect.TypeOf(&spec.Header{})
// specItemsType = reflect.TypeOf(&spec.Items{})
)
// SchemaValidator validates data against a JSON schema
type SchemaValidator struct {
Path string
in string
Schema *spec.Schema
validators []valueValidator
validators [8]valueValidator
Root interface{}
KnownFormats strfmt.Registry
Options SchemaValidatorOptions
Options *SchemaValidatorOptions
}
// AgainstSchema validates the specified data against the provided schema, using a registry of supported formats.
//
// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example.
func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error {
res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data)
res := NewSchemaValidator(schema, nil, "", formats,
append(options, WithRecycleValidators(true), withRecycleResults(true))...,
).Validate(data)
defer func() {
pools.poolOfResults.RedeemResult(res)
}()
if res.HasErrors() {
return errors.CompositeValidationError(res.Errors...)
}
return nil
}
@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr
//
// Panics if the provided schema is invalid.
func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator {
opts := new(SchemaValidatorOptions)
for _, o := range options {
o(opts)
}
return newSchemaValidator(schema, rootSchema, root, formats, opts)
}
func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator {
if schema == nil {
return nil
}
@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
panic(msg)
}
}
s := SchemaValidator{
Path: root,
in: "body",
Schema: schema,
Root: rootSchema,
KnownFormats: formats,
Options: SchemaValidatorOptions{}}
for _, o := range options {
o(&s.Options)
if opts == nil {
opts = new(SchemaValidatorOptions)
}
s.validators = []valueValidator{
var s *SchemaValidator
if opts.recycleValidators {
s = pools.poolOfSchemaValidators.BorrowValidator()
} else {
s = new(SchemaValidator)
}
s.Path = root
s.in = "body"
s.Schema = schema
s.Root = rootSchema
s.Options = opts
s.KnownFormats = formats
s.validators = [8]valueValidator{
s.typeValidator(),
s.schemaPropsValidator(),
s.stringValidator(),
@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string
s.commonValidator(),
s.objectValidator(),
}
return &s
return s
}
// SetPath sets the path for this schema valdiator
@ -108,17 +127,39 @@ func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool {
// Validate validates the data against the schema
func (s *SchemaValidator) Validate(data interface{}) *Result {
result := &Result{data: data}
if s == nil {
return result
return emptyResult
}
if s.Schema != nil {
if s.Options.recycleValidators {
defer func() {
s.redeemChildren()
s.redeem() // one-time use validator
}()
}
var result *Result
if s.Options.recycleResult {
result = pools.poolOfResults.BorrowResult()
result.data = data
} else {
result = &Result{data: data}
}
if s.Schema != nil && !s.Options.skipSchemataResult {
result.addRootObjectSchemata(s.Schema)
}
if data == nil {
// early exit with minimal validation
result.Merge(s.validators[0].Validate(data)) // type validator
result.Merge(s.validators[6].Validate(data)) // common validator
if s.Options.recycleValidators {
s.validators[0] = nil
s.validators[6] = nil
}
return result
}
@ -147,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if erri != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, erri))
result.Inc()
return result
}
d = in
@ -155,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
if errf != nil {
result.AddErrors(invalidTypeConversionMsg(s.Path, errf))
result.Inc()
return result
}
d = nf
@ -164,14 +207,26 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
kind = tpe.Kind()
}
for _, v := range s.validators {
for idx, v := range s.validators {
if !v.Applies(s.Schema, kind) {
debugLog("%T does not apply for %v", v, kind)
if s.Options.recycleValidators {
// Validate won't be called, so relinquish this validator
if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok {
redeemableChildren.redeemChildren()
}
if redeemable, ok := v.(interface{ redeem() }); ok {
redeemable.redeem()
}
s.validators[idx] = nil // prevents further (unsafe) usage
}
continue
}
err := v.Validate(d)
result.Merge(err)
result.Merge(v.Validate(d))
if s.Options.recycleValidators {
s.validators[idx] = nil // prevents further (unsafe) usage
}
result.Inc()
}
result.Inc()
@ -180,81 +235,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result {
}
func (s *SchemaValidator) typeValidator() valueValidator {
return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path}
return newTypeValidator(
s.Path,
s.in,
s.Schema.Type,
s.Schema.Nullable,
s.Schema.Format,
s.Options,
)
}
func (s *SchemaValidator) commonValidator() valueValidator {
return &basicCommonValidator{
Path: s.Path,
In: s.in,
Enum: s.Schema.Enum,
}
return newBasicCommonValidator(
s.Path,
s.in,
s.Schema.Default,
s.Schema.Enum,
s.Options,
)
}
func (s *SchemaValidator) sliceValidator() valueValidator {
return &schemaSliceValidator{
Path: s.Path,
In: s.in,
MaxItems: s.Schema.MaxItems,
MinItems: s.Schema.MinItems,
UniqueItems: s.Schema.UniqueItems,
AdditionalItems: s.Schema.AdditionalItems,
Items: s.Schema.Items,
Root: s.Root,
KnownFormats: s.KnownFormats,
Options: s.Options,
}
return newSliceValidator(
s.Path,
s.in,
s.Schema.MaxItems,
s.Schema.MinItems,
s.Schema.UniqueItems,
s.Schema.AdditionalItems,
s.Schema.Items,
s.Root,
s.KnownFormats,
s.Options,
)
}
func (s *SchemaValidator) numberValidator() valueValidator {
return &numberValidator{
Path: s.Path,
In: s.in,
Default: s.Schema.Default,
MultipleOf: s.Schema.MultipleOf,
Maximum: s.Schema.Maximum,
ExclusiveMaximum: s.Schema.ExclusiveMaximum,
Minimum: s.Schema.Minimum,
ExclusiveMinimum: s.Schema.ExclusiveMinimum,
}
return newNumberValidator(
s.Path,
s.in,
s.Schema.Default,
s.Schema.MultipleOf,
s.Schema.Maximum,
s.Schema.ExclusiveMaximum,
s.Schema.Minimum,
s.Schema.ExclusiveMinimum,
"",
"",
s.Options,
)
}
func (s *SchemaValidator) stringValidator() valueValidator {
return &stringValidator{
Path: s.Path,
In: s.in,
MaxLength: s.Schema.MaxLength,
MinLength: s.Schema.MinLength,
Pattern: s.Schema.Pattern,
}
return newStringValidator(
s.Path,
s.in,
nil,
false,
false,
s.Schema.MaxLength,
s.Schema.MinLength,
s.Schema.Pattern,
s.Options,
)
}
func (s *SchemaValidator) formatValidator() valueValidator {
return &formatValidator{
Path: s.Path,
In: s.in,
Format: s.Schema.Format,
KnownFormats: s.KnownFormats,
}
return newFormatValidator(
s.Path,
s.in,
s.Schema.Format,
s.KnownFormats,
s.Options,
)
}
func (s *SchemaValidator) schemaPropsValidator() valueValidator {
sch := s.Schema
return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...)
return newSchemaPropsValidator(
s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats,
s.Options,
)
}
func (s *SchemaValidator) objectValidator() valueValidator {
return &objectValidator{
Path: s.Path,
In: s.in,
MaxProperties: s.Schema.MaxProperties,
MinProperties: s.Schema.MinProperties,
Required: s.Schema.Required,
Properties: s.Schema.Properties,
AdditionalProperties: s.Schema.AdditionalProperties,
PatternProperties: s.Schema.PatternProperties,
Root: s.Root,
KnownFormats: s.KnownFormats,
Options: s.Options,
return newObjectValidator(
s.Path,
s.in,
s.Schema.MaxProperties,
s.Schema.MinProperties,
s.Schema.Required,
s.Schema.Properties,
s.Schema.AdditionalProperties,
s.Schema.PatternProperties,
s.Root,
s.KnownFormats,
s.Options,
)
}
func (s *SchemaValidator) redeem() {
pools.poolOfSchemaValidators.RedeemValidator(s)
}
func (s *SchemaValidator) redeemChildren() {
for i, validator := range s.validators {
if validator == nil {
continue
}
if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok {
redeemableChildren.redeemChildren()
}
if redeemable, ok := validator.(interface{ redeem() }); ok {
redeemable.redeem()
}
s.validators[i] = nil // free up allocated children if not in pool
}
}

View file

@ -18,6 +18,9 @@ package validate
type SchemaValidatorOptions struct {
EnableObjectArrayTypeCheck bool
EnableArrayMustHaveItemsCheck bool
recycleValidators bool
recycleResult bool
skipSchemataResult bool
}
// Option sets optional rules for schema validation
@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option {
}
}
// Options returns current options
// WithRecycleValidators saves memory allocations and makes validators
// available for a single use of Validate() only.
//
// When a validator is recycled, called MUST not call the Validate() method twice.
func WithRecycleValidators(enable bool) Option {
return func(svo *SchemaValidatorOptions) {
svo.recycleValidators = enable
}
}
func withRecycleResults(enable bool) Option {
return func(svo *SchemaValidatorOptions) {
svo.recycleResult = enable
}
}
// WithSkipSchemataResult skips the deep audit payload stored in validation Result
func WithSkipSchemataResult(enable bool) Option {
return func(svo *SchemaValidatorOptions) {
svo.skipSchemataResult = enable
}
}
// Options returns the current set of options
func (svo SchemaValidatorOptions) Options() []Option {
return []Option{
EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck),
EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck),
WithRecycleValidators(svo.recycleValidators),
withRecycleResults(svo.recycleResult),
WithSkipSchemataResult(svo.skipSchemataResult),
}
}

View file

@ -30,195 +30,279 @@ type schemaPropsValidator struct {
AnyOf []spec.Schema
Not *spec.Schema
Dependencies spec.Dependencies
anyOfValidators []SchemaValidator
allOfValidators []SchemaValidator
oneOfValidators []SchemaValidator
anyOfValidators []*SchemaValidator
allOfValidators []*SchemaValidator
oneOfValidators []*SchemaValidator
notValidator *SchemaValidator
Root interface{}
KnownFormats strfmt.Registry
Options SchemaValidatorOptions
Options *SchemaValidatorOptions
}
func (s *schemaPropsValidator) SetPath(path string) {
s.Path = path
}
func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator {
anyValidators := make([]SchemaValidator, 0, len(anyOf))
for _, v := range anyOf {
v := v
anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...))
func newSchemaPropsValidator(
path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry,
opts *SchemaValidatorOptions) *schemaPropsValidator {
if opts == nil {
opts = new(SchemaValidatorOptions)
}
allValidators := make([]SchemaValidator, 0, len(allOf))
for _, v := range allOf {
v := v
allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...))
anyValidators := make([]*SchemaValidator, 0, len(anyOf))
for i := range anyOf {
anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts))
}
oneValidators := make([]SchemaValidator, 0, len(oneOf))
for _, v := range oneOf {
v := v
oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...))
allValidators := make([]*SchemaValidator, 0, len(allOf))
for i := range allOf {
allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts))
}
oneValidators := make([]*SchemaValidator, 0, len(oneOf))
for i := range oneOf {
oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts))
}
var notValidator *SchemaValidator
if not != nil {
notValidator = NewSchemaValidator(not, root, path, formats, options...)
notValidator = newSchemaValidator(not, root, path, formats, opts)
}
schOptions := &SchemaValidatorOptions{}
for _, o := range options {
o(schOptions)
}
return &schemaPropsValidator{
Path: path,
In: in,
AllOf: allOf,
OneOf: oneOf,
AnyOf: anyOf,
Not: not,
Dependencies: deps,
anyOfValidators: anyValidators,
allOfValidators: allValidators,
oneOfValidators: oneValidators,
notValidator: notValidator,
Root: root,
KnownFormats: formats,
Options: *schOptions,
}
var s *schemaPropsValidator
if opts.recycleValidators {
s = pools.poolOfSchemaPropsValidators.BorrowValidator()
} else {
s = new(schemaPropsValidator)
}
func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool {
r := reflect.TypeOf(source) == specSchemaType
debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind)
return r
s.Path = path
s.In = in
s.AllOf = allOf
s.OneOf = oneOf
s.AnyOf = anyOf
s.Not = not
s.Dependencies = deps
s.anyOfValidators = anyValidators
s.allOfValidators = allValidators
s.oneOfValidators = oneValidators
s.notValidator = notValidator
s.Root = root
s.KnownFormats = formats
s.Options = opts
return s
}
func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool {
_, isSchema := source.(*spec.Schema)
return isSchema
}
func (s *schemaPropsValidator) Validate(data interface{}) *Result {
mainResult := new(Result)
var mainResult *Result
if s.Options.recycleResult {
mainResult = pools.poolOfResults.BorrowResult()
} else {
mainResult = new(Result)
}
// Intermediary error results
// IMPORTANT! messages from underlying validators
keepResultAnyOf := new(Result)
keepResultOneOf := new(Result)
keepResultAllOf := new(Result)
var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result
if s.Options.recycleValidators {
defer func() {
s.redeemChildren()
s.redeem()
// results are redeemed when merged
}()
}
// Validates at least one in anyOf schemas
var firstSuccess *Result
if len(s.anyOfValidators) > 0 {
keepResultAnyOf = pools.poolOfResults.BorrowResult()
s.validateAnyOf(data, mainResult, keepResultAnyOf)
}
if len(s.oneOfValidators) > 0 {
keepResultOneOf = pools.poolOfResults.BorrowResult()
s.validateOneOf(data, mainResult, keepResultOneOf)
}
if len(s.allOfValidators) > 0 {
keepResultAllOf = pools.poolOfResults.BorrowResult()
s.validateAllOf(data, mainResult, keepResultAllOf)
}
if s.notValidator != nil {
s.validateNot(data, mainResult)
}
if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
s.validateDependencies(data, mainResult)
}
mainResult.Inc()
// In the end we retain best failures for schema validation
// plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
}
func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) {
// Validates at least one in anyOf schemas
var bestFailures *Result
succeededOnce := false
for _, anyOfSchema := range s.anyOfValidators {
for i, anyOfSchema := range s.anyOfValidators {
result := anyOfSchema.Validate(data)
if s.Options.recycleValidators {
s.anyOfValidators[i] = nil
}
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
keepResultAnyOf.Merge(result.keepRelevantErrors())
keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
if result.IsValid() {
bestFailures = nil
succeededOnce = true
if firstSuccess == nil {
firstSuccess = result
if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(bestFailures)
}
keepResultAnyOf = new(Result)
break
_ = keepResultAnyOf.cleared()
mainResult.Merge(result)
return
}
// MatchCount is used to select errors from the schema with most positive checks
if bestFailures == nil || result.MatchCount > bestFailures.MatchCount {
if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(bestFailures)
}
bestFailures = result
}
}
if !succeededOnce {
mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
}
if bestFailures != nil {
mainResult.Merge(bestFailures)
} else if firstSuccess != nil {
mainResult.Merge(firstSuccess)
}
}
// Validates exactly one in oneOf schemas
if len(s.oneOfValidators) > 0 {
var bestFailures *Result
var firstSuccess *Result
validated := 0
for _, oneOfSchema := range s.oneOfValidators {
result := oneOfSchema.Validate(data)
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
keepResultOneOf.Merge(result.keepRelevantErrors())
if result.IsValid() {
validated++
bestFailures = nil
if firstSuccess == nil {
firstSuccess = result
}
keepResultOneOf = new(Result)
continue
}
// MatchCount is used to select errors from the schema with most positive checks
if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
bestFailures = result
if result.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(result) // this result is ditched
}
}
if validated != 1 {
var additionalMsg string
if validated == 0 {
additionalMsg = "Found none valid"
} else {
additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated)
}
mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg))
if bestFailures != nil {
mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path))
mainResult.Merge(bestFailures)
}
} else if firstSuccess != nil {
func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) {
// Validates exactly one in oneOf schemas
var (
firstSuccess, bestFailures *Result
validated int
)
for i, oneOfSchema := range s.oneOfValidators {
result := oneOfSchema.Validate(data)
if s.Options.recycleValidators {
s.oneOfValidators[i] = nil
}
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result
if result.IsValid() {
validated++
_ = keepResultOneOf.cleared()
if firstSuccess == nil {
firstSuccess = result
} else if result.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(result) // this result is ditched
}
continue
}
// MatchCount is used to select errors from the schema with most positive checks
if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) {
if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(bestFailures)
}
bestFailures = result
} else if result.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(result) // this result is ditched
}
}
switch validated {
case 0:
mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid"))
mainResult.Merge(bestFailures)
// firstSucess necessarily nil
case 1:
mainResult.Merge(firstSuccess)
if bestFailures != nil && bestFailures.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(bestFailures)
}
default:
mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated)))
mainResult.Merge(bestFailures)
if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(firstSuccess)
}
}
}
func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) {
// Validates all of allOf schemas
if len(s.allOfValidators) > 0 {
validated := 0
var validated int
for _, allOfSchema := range s.allOfValidators {
for i, allOfSchema := range s.allOfValidators {
result := allOfSchema.Validate(data)
if s.Options.recycleValidators {
s.allOfValidators[i] = nil
}
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
keepResultAllOf.Merge(result.keepRelevantErrors())
// keepResultAllOf.Merge(result)
if result.IsValid() {
validated++
}
mainResult.Merge(result)
}
if validated != len(s.allOfValidators) {
additionalMsg := ""
if validated == 0 {
additionalMsg = ". None validated"
}
mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg))
switch validated {
case 0:
mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated"))
case len(s.allOfValidators):
default:
mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ""))
}
}
if s.notValidator != nil {
func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) {
result := s.notValidator.Validate(data)
if s.Options.recycleValidators {
s.notValidator = nil
}
// We keep inner IMPORTANT! errors no matter what MatchCount tells us
if result.IsValid() {
mainResult.AddErrors(mustNotValidatechemaMsg(s.Path))
}
if result.wantsRedeemOnMerge {
pools.poolOfResults.RedeemResult(result) // this result is ditched
}
}
if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map {
func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) {
val := data.(map[string]interface{})
for key := range val {
if dep, ok := s.Dependencies[key]; ok {
dep, ok := s.Dependencies[key]
if !ok {
continue
}
if dep.Schema != nil {
mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data))
mainResult.Merge(
newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data),
)
continue
}
@ -231,10 +315,42 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result {
}
}
}
func (s *schemaPropsValidator) redeem() {
pools.poolOfSchemaPropsValidators.RedeemValidator(s)
}
mainResult.Inc()
// In the end we retain best failures for schema validation
// plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!).
return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf)
func (s *schemaPropsValidator) redeemChildren() {
for _, v := range s.anyOfValidators {
if v == nil {
continue
}
v.redeemChildren()
v.redeem()
}
s.anyOfValidators = nil
for _, v := range s.allOfValidators {
if v == nil {
continue
}
v.redeemChildren()
v.redeem()
}
s.allOfValidators = nil
for _, v := range s.oneOfValidators {
if v == nil {
continue
}
v.redeemChildren()
v.redeem()
}
s.oneOfValidators = nil
if s.notValidator != nil {
s.notValidator.redeemChildren()
s.notValidator.redeem()
s.notValidator = nil
}
}

Some files were not shown because too many files have changed in this diff Show more