Update dependencies

This change updates all dependencies.

Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
Gabriel Adrian Samfira 2025-02-24 07:59:10 +00:00
parent f2b43bac77
commit 5415121a70
289 changed files with 7700 additions and 3245 deletions

View file

@ -1,115 +0,0 @@
// Copyright 2023 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
)
// GetAllOrganizationRulesets gets all the rulesets for the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#get-all-organization-repository-rulesets
//
//meta:operation GET /orgs/{org}/rulesets
func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets", org)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var rulesets []*Ruleset
resp, err := s.client.Do(ctx, req, &rulesets)
if err != nil {
return nil, resp, err
}
return rulesets, resp, nil
}
// CreateOrganizationRuleset creates a ruleset for the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#create-an-organization-repository-ruleset
//
//meta:operation POST /orgs/{org}/rulesets
func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets", org)
req, err := s.client.NewRequest("POST", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// GetOrganizationRuleset gets a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#get-an-organization-repository-ruleset
//
//meta:operation GET /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateOrganizationRuleset updates a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#update-an-organization-repository-ruleset
//
//meta:operation PUT /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("PUT", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// DeleteOrganizationRuleset deletes a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#delete-an-organization-repository-ruleset
//
//meta:operation DELETE /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -1,995 +0,0 @@
// Copyright 2023 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"encoding/json"
"fmt"
)
// BypassActor represents the bypass actors from a ruleset.
type BypassActor struct {
ActorID *int64 `json:"actor_id,omitempty"`
// Possible values for ActorType are: RepositoryRole, Team, Integration, OrganizationAdmin
ActorType *string `json:"actor_type,omitempty"`
// Possible values for BypassMode are: always, pull_request
BypassMode *string `json:"bypass_mode,omitempty"`
}
// RulesetLink represents a single link object from GitHub ruleset request _links.
type RulesetLink struct {
HRef *string `json:"href,omitempty"`
}
// RulesetLinks represents the "_links" object in a Ruleset.
type RulesetLinks struct {
Self *RulesetLink `json:"self,omitempty"`
}
// RulesetRefConditionParameters represents the conditions object for ref_names.
type RulesetRefConditionParameters struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
}
// RulesetRepositoryNamesConditionParameters represents the conditions object for repository_names.
type RulesetRepositoryNamesConditionParameters struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
Protected *bool `json:"protected,omitempty"`
}
// RulesetRepositoryIDsConditionParameters represents the conditions object for repository_ids.
type RulesetRepositoryIDsConditionParameters struct {
RepositoryIDs []int64 `json:"repository_ids,omitempty"`
}
// RulesetRepositoryPropertyTargetParameters represents a repository_property name and values to be used for targeting.
type RulesetRepositoryPropertyTargetParameters struct {
Name string `json:"name"`
Values []string `json:"property_values"`
Source *string `json:"source,omitempty"`
}
// RulesetRepositoryPropertyConditionParameters represents the conditions object for repository_property.
type RulesetRepositoryPropertyConditionParameters struct {
Include []RulesetRepositoryPropertyTargetParameters `json:"include"`
Exclude []RulesetRepositoryPropertyTargetParameters `json:"exclude"`
}
// RulesetConditions represents the conditions object in a ruleset.
// Set either RepositoryName or RepositoryID or RepositoryProperty, not more than one.
type RulesetConditions struct {
RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"`
RepositoryName *RulesetRepositoryNamesConditionParameters `json:"repository_name,omitempty"`
RepositoryID *RulesetRepositoryIDsConditionParameters `json:"repository_id,omitempty"`
RepositoryProperty *RulesetRepositoryPropertyConditionParameters `json:"repository_property,omitempty"`
}
// RulePatternParameters represents the rule pattern parameters.
type RulePatternParameters struct {
Name *string `json:"name,omitempty"`
// If Negate is true, the rule will fail if the pattern matches.
Negate *bool `json:"negate,omitempty"`
// Possible values for Operator are: starts_with, ends_with, contains, regex
Operator string `json:"operator"`
Pattern string `json:"pattern"`
}
// RuleFileParameters represents a list of file paths.
type RuleFileParameters struct {
RestrictedFilePaths *[]string `json:"restricted_file_paths"`
}
// RuleMaxFilePathLengthParameters represents the max_file_path_length rule parameters.
type RuleMaxFilePathLengthParameters struct {
MaxFilePathLength int `json:"max_file_path_length"`
}
// RuleFileExtensionRestrictionParameters represents the file_extension_restriction rule parameters.
type RuleFileExtensionRestrictionParameters struct {
RestrictedFileExtensions []string `json:"restricted_file_extensions"`
}
// RuleMaxFileSizeParameters represents the max_file_size rule parameters.
type RuleMaxFileSizeParameters struct {
MaxFileSize int64 `json:"max_file_size"`
}
// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters.
type UpdateAllowsFetchAndMergeRuleParameters struct {
UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"`
}
// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters.
type RequiredDeploymentEnvironmentsRuleParameters struct {
RequiredDeploymentEnvironments []string `json:"required_deployment_environments"`
}
// PullRequestRuleParameters represents the pull_request rule parameters.
type PullRequestRuleParameters struct {
DismissStaleReviewsOnPush bool `json:"dismiss_stale_reviews_on_push"`
RequireCodeOwnerReview bool `json:"require_code_owner_review"`
RequireLastPushApproval bool `json:"require_last_push_approval"`
RequiredApprovingReviewCount int `json:"required_approving_review_count"`
RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"`
}
// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object.
type RuleRequiredStatusChecks struct {
Context string `json:"context"`
IntegrationID *int64 `json:"integration_id,omitempty"`
}
// MergeQueueRuleParameters represents the merge_queue rule parameters.
type MergeQueueRuleParameters struct {
CheckResponseTimeoutMinutes int `json:"check_response_timeout_minutes"`
// Possible values for GroupingStrategy are: ALLGREEN, HEADGREEN
GroupingStrategy string `json:"grouping_strategy"`
MaxEntriesToBuild int `json:"max_entries_to_build"`
MaxEntriesToMerge int `json:"max_entries_to_merge"`
// Possible values for MergeMethod are: MERGE, SQUASH, REBASE
MergeMethod string `json:"merge_method"`
MinEntriesToMerge int `json:"min_entries_to_merge"`
MinEntriesToMergeWaitMinutes int `json:"min_entries_to_merge_wait_minutes"`
}
// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters.
type RequiredStatusChecksRuleParameters struct {
DoNotEnforceOnCreate *bool `json:"do_not_enforce_on_create,omitempty"`
RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"`
StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"`
}
// RuleRequiredWorkflow represents the Workflow for the RequireWorkflowsRuleParameters object.
type RuleRequiredWorkflow struct {
Path string `json:"path"`
Ref *string `json:"ref,omitempty"`
RepositoryID *int64 `json:"repository_id,omitempty"`
Sha *string `json:"sha,omitempty"`
}
// RequiredWorkflowsRuleParameters represents the workflows rule parameters.
type RequiredWorkflowsRuleParameters struct {
DoNotEnforceOnCreate bool `json:"do_not_enforce_on_create,omitempty"`
RequiredWorkflows []*RuleRequiredWorkflow `json:"workflows"`
}
// RuleRequiredCodeScanningTool represents a single required code-scanning tool for the RequiredCodeScanningParameters object.
type RuleRequiredCodeScanningTool struct {
AlertsThreshold string `json:"alerts_threshold"`
SecurityAlertsThreshold string `json:"security_alerts_threshold"`
Tool string `json:"tool"`
}
// RequiredCodeScanningRuleParameters represents the code_scanning rule parameters.
type RequiredCodeScanningRuleParameters struct {
RequiredCodeScanningTools []*RuleRequiredCodeScanningTool `json:"code_scanning_tools"`
}
// RepositoryRule represents a GitHub Rule.
type RepositoryRule struct {
Type string `json:"type"`
Parameters *json.RawMessage `json:"parameters,omitempty"`
RulesetSourceType string `json:"ruleset_source_type"`
RulesetSource string `json:"ruleset_source"`
RulesetID int64 `json:"ruleset_id"`
}
// RepositoryRulesetEditedChanges represents the changes made to a repository ruleset.
type RepositoryRulesetEditedChanges struct {
Name *RepositoryRulesetEditedSource `json:"name,omitempty"`
Enforcement *RepositoryRulesetEditedSource `json:"enforcement,omitempty"`
Conditions *RepositoryRulesetEditedConditions `json:"conditions,omitempty"`
Rules *RepositoryRulesetEditedRules `json:"rules,omitempty"`
}
// RepositoryRulesetEditedSource represents a source change for the ruleset.
type RepositoryRulesetEditedSource struct {
From *string `json:"from,omitempty"`
}
// RepositoryRulesetEditedSources represents multiple source changes for the ruleset.
type RepositoryRulesetEditedSources struct {
From []string `json:"from,omitempty"`
}
// RepositoryRulesetEditedConditions holds changes to conditions in a ruleset.
type RepositoryRulesetEditedConditions struct {
Added []*RepositoryRulesetRefCondition `json:"added,omitempty"`
Deleted []*RepositoryRulesetRefCondition `json:"deleted,omitempty"`
Updated []*RepositoryRulesetEditedUpdatedConditions `json:"updated,omitempty"`
}
// RepositoryRulesetEditedRules holds changes to rules in a ruleset.
type RepositoryRulesetEditedRules struct {
Added []*RepositoryRulesetRule `json:"added,omitempty"`
Deleted []*RepositoryRulesetRule `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedRules `json:"updated,omitempty"`
}
// RepositoryRulesetRefCondition represents a reference condition for the ruleset.
type RepositoryRulesetRefCondition struct {
RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"`
}
// RepositoryRulesetEditedUpdatedConditions holds updates to conditions in a ruleset.
type RepositoryRulesetEditedUpdatedConditions struct {
Condition *RepositoryRulesetRefCondition `json:"condition,omitempty"`
Changes *RepositoryRulesetUpdatedConditionsEdited `json:"changes,omitempty"`
}
// RepositoryRulesetUpdatedConditionsEdited holds the edited updates to conditions in a ruleset.
type RepositoryRulesetUpdatedConditionsEdited struct {
ConditionType *RepositoryRulesetEditedSource `json:"condition_type,omitempty"`
Target *RepositoryRulesetEditedSource `json:"target,omitempty"`
Include *RepositoryRulesetEditedSources `json:"include,omitempty"`
Exclude *RepositoryRulesetEditedSources `json:"exclude,omitempty"`
}
// RepositoryRulesetUpdatedRules holds updates to rules in a ruleset.
type RepositoryRulesetUpdatedRules struct {
Rule *RepositoryRulesetRule `json:"rule,omitempty"`
Changes *RepositoryRulesetEditedRuleChanges `json:"changes,omitempty"`
}
// RepositoryRulesetEditedRuleChanges holds changes made to a rule in a ruleset.
type RepositoryRulesetEditedRuleChanges struct {
Configuration *RepositoryRulesetEditedSources `json:"configuration,omitempty"`
RuleType *RepositoryRulesetEditedSources `json:"rule_type,omitempty"`
Pattern *RepositoryRulesetEditedSources `json:"pattern,omitempty"`
}
// RepositoryRuleset represents the structure of a ruleset associated with a GitHub repository.
type RepositoryRuleset struct {
ID int64 `json:"id"`
Name string `json:"name"`
// Possible values for target: "branch", "tag", "push"
Target *string `json:"target,omitempty"`
// Possible values for source type: "Repository", "Organization"
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for enforcement: "disabled", "active", "evaluate"
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors,omitempty"`
// Possible values for current user can bypass: "always", "pull_requests_only", "never"
CurrentUserCanBypass *string `json:"current_user_can_bypass,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Links *RepositoryRulesetLink `json:"_links,omitempty"`
Conditions json.RawMessage `json:"conditions,omitempty"`
Rules []*RepositoryRulesetRule `json:"rules,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
}
// RepositoryRulesetRule represents individual rules which are present in a repository's ruleset.
type RepositoryRulesetRule struct {
Creation *RepositoryRulesetRuleType `json:"creation,omitempty"`
Update *RepositoryRulesetUpdateRule `json:"update,omitempty"`
Deletion *RepositoryRulesetRuleType `json:"deletion,omitempty"`
RequiredLinearHistory *RepositoryRulesetRuleType `json:"required_linear_history,omitempty"`
MergeQueue *RepositoryRulesetMergeQueueRule `json:"merge_queue,omitempty"`
RequiredDeployments *RepositoryRulesetRequiredDeploymentsRule `json:"required_deployments,omitempty"`
RequiredSignatures *RepositoryRulesetRuleType `json:"required_signatures,omitempty"`
PullRequest *RepositoryRulesetPullRequestRule `json:"pull_request,omitempty"`
RequiredStatusChecks *RepositoryRulesetRequiredStatusChecksRule `json:"required_status_checks,omitempty"`
NonFastForward *RepositoryRulesetRuleType `json:"non_fast_forward,omitempty"`
CommitMessagePattern *RepositoryRulesetPatternRule `json:"commit_message_pattern,omitempty"`
CommitAuthorEmailPattern *RepositoryRulesetPatternRule `json:"commit_author_email_pattern,omitempty"`
CommitterEmailPattern *RepositoryRulesetPatternRule `json:"committer_email_pattern,omitempty"`
BranchNamePattern *RepositoryRulesetPatternRule `json:"branch_name_pattern,omitempty"`
TagNamePattern *RepositoryRulesetPatternRule `json:"tag_name_pattern,omitempty"`
FilePathRestriction *RepositoryRulesetFilePathRestrictionRule `json:"file_path_restriction,omitempty"`
MaxFilePathLength *RepositoryRulesetMaxFilePathLengthRule `json:"max_file_path_length,omitempty"`
FileExtensionRestriction *RepositoryRulesetFileExtensionRestrictionRule `json:"file_extension_restriction,omitempty"`
MaxFileSize *RepositoryRulesetMaxFileSizeRule `json:"max_file_size,omitempty"`
Workflows *RepositoryRulesetWorkflowsRule `json:"workflows,omitempty"`
CodeScanning *RepositoryRulesetCodeScanningRule `json:"code_scanning,omitempty"`
}
// RepositoryRulesetLink represents Links associated with a repository's rulesets. These links are used to provide more information about the ruleset.
type RepositoryRulesetLink struct {
Self *RulesetLink `json:"self,omitempty"`
HTML *RulesetLink `json:"html,omitempty"`
}
// RepositoryRulesetRuleType represents the type of a ruleset rule.
type RepositoryRulesetRuleType struct {
Type string `json:"type"`
}
// RepositoryRulesetUpdateRule defines an update rule for the repository.
type RepositoryRulesetUpdateRule struct {
// Type can be one of: "update".
Type string `json:"type"`
Parameters *UpdateAllowsFetchAndMergeRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMergeQueueRule defines a merge queue rule for the repository.
type RepositoryRulesetMergeQueueRule struct {
// Type can be one of: "merge_queue".
Type string `json:"type"`
Parameters *MergeQueueRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetRequiredDeploymentsRule defines a rule for required deployments.
type RepositoryRulesetRequiredDeploymentsRule struct {
// Type can be one of: "required_deployments".
Type string `json:"type"`
Parameters *RequiredDeploymentEnvironmentsRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetPullRequestRule defines a rule for pull requests.
type RepositoryRulesetPullRequestRule struct {
// Type can be one of: "pull_request".
Type string `json:"type"`
Parameters *PullRequestRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetRequiredStatusChecksRule defines a rule for required status checks.
type RepositoryRulesetRequiredStatusChecksRule struct {
// Type can be one of: "required_status_checks".
Type string `json:"type"`
Parameters *RequiredStatusChecksRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetPatternRule defines a pattern rule for the repository.
type RepositoryRulesetPatternRule struct {
Type string `json:"type"`
Parameters *RulePatternParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetFilePathRestrictionRule defines a file path restriction rule for the repository.
type RepositoryRulesetFilePathRestrictionRule struct {
// Type can be one of: "file_path_restriction".
Type string `json:"type"`
Parameters *RuleFileParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMaxFilePathLengthRule defines a maximum file path length rule for the repository.
type RepositoryRulesetMaxFilePathLengthRule struct {
// Type can be one of: "max_file_path_length".
Type string `json:"type"`
Parameters *RuleMaxFilePathLengthParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetFileExtensionRestrictionRule defines a file extension restriction rule for the repository.
type RepositoryRulesetFileExtensionRestrictionRule struct {
// Type can be one of: "file_extension_restriction".
Type string `json:"type"`
Parameters *RuleFileExtensionRestrictionParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMaxFileSizeRule defines a maximum file size rule for the repository.
type RepositoryRulesetMaxFileSizeRule struct {
// Type can be one of: "max_file_size".
Type string `json:"type"`
Parameters *RuleMaxFileSizeParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetWorkflowsRule defines a workflow rule for the repository.
type RepositoryRulesetWorkflowsRule struct {
// Type can be one of: "workflows".
Type string `json:"type"`
Parameters *RequiredWorkflowsRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetCodeScanningRule defines a code scanning rule for the repository.
type RepositoryRulesetCodeScanningRule struct {
// Type can be one of: "code_scanning".
Type string `json:"type"`
Parameters *RuleCodeScanningParameters `json:"parameters,omitempty"`
}
// RuleCodeScanningParameters defines parameters for code scanning rules.
type RuleCodeScanningParameters struct {
CodeScanningTools []*CodeScanningTool `json:"code_scanning_tools,omitempty"`
}
// CodeScanningTool defines a specific tool used for code scanning.
type CodeScanningTool struct {
AlertsThreshold string `json:"alerts_threshold"`
SecurityAlertsThreshold string `json:"security_alerts_threshold"`
Tool string `json:"tool"`
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// This helps us handle the fact that RepositoryRule parameter field can be of numerous types.
func (r *RepositoryRule) UnmarshalJSON(data []byte) error {
type rule RepositoryRule
var repositoryRule rule
if err := json.Unmarshal(data, &repositoryRule); err != nil {
return err
}
r.RulesetID = repositoryRule.RulesetID
r.RulesetSourceType = repositoryRule.RulesetSourceType
r.RulesetSource = repositoryRule.RulesetSource
r.Type = repositoryRule.Type
switch repositoryRule.Type {
case "creation", "deletion", "non_fast_forward", "required_linear_history", "required_signatures":
r.Parameters = nil
case "update":
if repositoryRule.Parameters == nil {
r.Parameters = nil
return nil
}
params := UpdateAllowsFetchAndMergeRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "merge_queue":
if repositoryRule.Parameters == nil {
r.Parameters = nil
return nil
}
params := MergeQueueRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "required_deployments":
params := RequiredDeploymentEnvironmentsRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern":
params := RulePatternParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "pull_request":
params := PullRequestRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "required_status_checks":
params := RequiredStatusChecksRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "workflows":
params := RequiredWorkflowsRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "file_path_restriction":
params := RuleFileParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "code_scanning":
params := RequiredCodeScanningRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "max_file_path_length":
params := RuleMaxFilePathLengthParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "file_extension_restriction":
params := RuleFileExtensionRestrictionParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "max_file_size":
params := RuleMaxFileSizeParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
default:
r.Type = ""
r.Parameters = nil
return fmt.Errorf("RepositoryRule.Type %q is not yet implemented, unable to unmarshal (%#v)", repositoryRule.Type, repositoryRule)
}
return nil
}
// NewMergeQueueRule creates a rule to only allow merges via a merge queue.
func NewMergeQueueRule(params *MergeQueueRuleParameters) (rule *RepositoryRule) {
if params != nil {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "merge_queue",
Parameters: &rawParams,
}
}
return &RepositoryRule{
Type: "merge_queue",
}
}
// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs.
func NewCreationRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "creation",
}
}
// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs.
func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) {
if params != nil {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "update",
Parameters: &rawParams,
}
}
return &RepositoryRule{
Type: "update",
}
}
// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs.
func NewDeletionRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "deletion",
}
}
// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches.
func NewRequiredLinearHistoryRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "required_linear_history",
}
}
// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches.
func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "required_deployments",
Parameters: &rawParams,
}
}
// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures.
func NewRequiredSignaturesRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "required_signatures",
}
}
// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged.
func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "pull_request",
Parameters: &rawParams,
}
}
// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "required_status_checks",
Parameters: &rawParams,
}
}
// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches.
func NewNonFastForwardRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "non_fast_forward",
}
}
// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches.
func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "commit_message_pattern",
Parameters: &rawParams,
}
}
// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches.
func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "commit_author_email_pattern",
Parameters: &rawParams,
}
}
// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches.
func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "committer_email_pattern",
Parameters: &rawParams,
}
}
// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches.
func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "branch_name_pattern",
Parameters: &rawParams,
}
}
// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches.
func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "tag_name_pattern",
Parameters: &rawParams,
}
}
// NewRequiredWorkflowsRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
func NewRequiredWorkflowsRule(params *RequiredWorkflowsRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "workflows",
Parameters: &rawParams,
}
}
// NewRequiredCodeScanningRule creates a rule to require which tools must provide code scanning results before the reference is updated.
func NewRequiredCodeScanningRule(params *RequiredCodeScanningRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "code_scanning",
Parameters: &rawParams,
}
}
// NewFilePathRestrictionRule creates a rule to restrict file paths from being pushed to.
func NewFilePathRestrictionRule(params *RuleFileParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "file_path_restriction",
Parameters: &rawParams,
}
}
// NewMaxFilePathLengthRule creates a rule to restrict file paths longer than the limit from being pushed.
func NewMaxFilePathLengthRule(params *RuleMaxFilePathLengthParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "max_file_path_length",
Parameters: &rawParams,
}
}
// NewFileExtensionRestrictionRule creates a rule to restrict file extensions from being pushed to a commit.
func NewFileExtensionRestrictionRule(params *RuleFileExtensionRestrictionParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "file_extension_restriction",
Parameters: &rawParams,
}
}
// NewMaxFileSizeRule creates a rule to restrict file sizes from being pushed to a commit.
func NewMaxFileSizeRule(params *RuleMaxFileSizeParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "max_file_size",
Parameters: &rawParams,
}
}
// Ruleset represents a GitHub ruleset object.
type Ruleset struct {
ID *int64 `json:"id,omitempty"`
Name string `json:"name"`
// Possible values for Target are branch, tag, push
Target *string `json:"target,omitempty"`
// Possible values for SourceType are: Repository, Organization
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for Enforcement are: disabled, active, evaluate
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Links *RulesetLinks `json:"_links,omitempty"`
Conditions *RulesetConditions `json:"conditions,omitempty"`
Rules []*RepositoryRule `json:"rules,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
}
// rulesetNoOmitBypassActors represents a GitHub ruleset object. The struct does not omit bypassActors if the field is nil or an empty array is passed.
type rulesetNoOmitBypassActors struct {
ID *int64 `json:"id,omitempty"`
Name string `json:"name"`
// Possible values for Target are branch, tag
Target *string `json:"target,omitempty"`
// Possible values for SourceType are: Repository, Organization
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for Enforcement are: disabled, active, evaluate
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors"`
NodeID *string `json:"node_id,omitempty"`
Links *RulesetLinks `json:"_links,omitempty"`
Conditions *RulesetConditions `json:"conditions,omitempty"`
Rules []*RepositoryRule `json:"rules,omitempty"`
}
// GetRulesForBranch gets all the rules that apply to the specified branch.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-rules-for-a-branch
//
//meta:operation GET /repos/{owner}/{repo}/rules/branches/{branch}
func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var rules []*RepositoryRule
resp, err := s.client.Do(ctx, req, &rules)
if err != nil {
return nil, resp, err
}
return rules, resp, nil
}
// GetAllRulesets gets all the rules that apply to the specified repository.
// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-all-repository-rulesets
//
//meta:operation GET /repos/{owner}/{repo}/rulesets
func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset []*Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// CreateRuleset creates a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#create-a-repository-ruleset
//
//meta:operation POST /repos/{owner}/{repo}/rulesets
func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo)
req, err := s.client.NewRequest("POST", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// GetRuleset gets a ruleset for the specified repository.
// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-a-repository-ruleset
//
//meta:operation GET /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRuleset updates a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#update-a-repository-ruleset
//
//meta:operation PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
req, err := s.client.NewRequest("PUT", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRulesetNoBypassActor updates a ruleset for the specified repository.
//
// This function is necessary as the UpdateRuleset function does not marshal ByPassActor if passed as nil or an empty array.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#update-a-repository-ruleset
//
//meta:operation PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) UpdateRulesetNoBypassActor(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
rsNoBypassActor := &rulesetNoOmitBypassActors{}
if rs != nil {
rsNoBypassActor = &rulesetNoOmitBypassActors{
ID: rs.ID,
Name: rs.Name,
Target: rs.Target,
SourceType: rs.SourceType,
Source: rs.Source,
Enforcement: rs.Enforcement,
BypassActors: rs.BypassActors,
NodeID: rs.NodeID,
Links: rs.Links,
Conditions: rs.Conditions,
Rules: rs.Rules,
}
}
req, err := s.client.NewRequest("PUT", u, rsNoBypassActor)
if err != nil {
return nil, nil, err
}
var ruleSet *Ruleset
resp, err := s.client.Do(ctx, req, &ruleSet)
if err != nil {
return nil, resp, err
}
return ruleSet, resp, nil
}
// DeleteRuleset deletes a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#delete-a-repository-ruleset
//
//meta:operation DELETE /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -142,6 +142,14 @@ func (s *ActionsService) GetArtifact(ctx context.Context, owner, repo string, ar
func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo string, artifactID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v/zip", owner, repo, artifactID)
if s.client.RateLimitRedirectionalEndpoints {
return s.downloadArtifactWithRateLimit(ctx, u, maxRedirects)
}
return s.downloadArtifactWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) downloadArtifactWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -149,7 +157,7 @@ func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo strin
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
@ -160,6 +168,26 @@ func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo strin
return parsedURL, newResponse(resp), nil
}
func (s *ActionsService) downloadArtifactWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// DeleteArtifact deletes a workflow run artifact.
//
// GitHub API docs: https://docs.github.com/rest/actions/artifacts#delete-an-artifact

View file

@ -150,6 +150,14 @@ func (s *ActionsService) GetWorkflowJobByID(ctx context.Context, owner, repo str
func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo string, jobID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/logs", owner, repo, jobID)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowJobLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowJobLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowJobLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -157,9 +165,29 @@ func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo str
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowJobLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}

View file

@ -204,6 +204,7 @@ func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner,
}
// GetWorkflowRunByID gets a specific workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-a-workflow-run
//
@ -226,6 +227,7 @@ func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo str
}
// GetWorkflowRunAttempt gets a specific workflow run attempt.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-a-workflow-run-attempt
//
@ -252,6 +254,7 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo
}
// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve a workflow run ID from the DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#download-workflow-run-attempt-logs
//
@ -259,6 +262,14 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo
func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowRunAttemptLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowRunAttemptLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowRunAttemptLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -266,14 +277,35 @@ func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, r
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowRunAttemptLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// RerunWorkflowByID re-runs a workflow by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID a the DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-a-workflow
//
@ -290,6 +322,7 @@ func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo stri
}
// RerunFailedJobsByID re-runs all of the failed jobs and their dependent jobs in a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-failed-jobs-from-a-workflow-run
//
@ -307,6 +340,8 @@ func (s *ActionsService) RerunFailedJobsByID(ctx context.Context, owner, repo st
// RerunJobByID re-runs a job and its dependent jobs in a workflow run by ID.
//
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-a-job-from-a-workflow-run
//
//meta:operation POST /repos/{owner}/{repo}/actions/jobs/{job_id}/rerun
@ -322,6 +357,7 @@ func (s *ActionsService) RerunJobByID(ctx context.Context, owner, repo string, j
}
// CancelWorkflowRunByID cancels a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#cancel-a-workflow-run
//
@ -338,6 +374,7 @@ func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo
}
// GetWorkflowRunLogs gets a redirect URL to download a plain text file of logs for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#download-workflow-run-logs
//
@ -345,6 +382,14 @@ func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo
func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowRunLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowRunLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowRunLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -359,7 +404,28 @@ func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo str
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowRunLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// DeleteWorkflowRun deletes a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#delete-a-workflow-run
//
@ -376,6 +442,7 @@ func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo stri
}
// DeleteWorkflowRunLogs deletes all logs for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#delete-workflow-run-logs
//
@ -392,6 +459,7 @@ func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo
}
// GetWorkflowRunUsageByID gets a specific workflow usage run by run ID in the unit of billable milliseconds.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-workflow-run-usage
//
@ -414,6 +482,7 @@ func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, rep
}
// GetPendingDeployments get all deployment environments for a workflow run that are waiting for protection rules to pass.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-pending-deployments-for-a-workflow-run
//
@ -436,6 +505,7 @@ func (s *ActionsService) GetPendingDeployments(ctx context.Context, owner, repo
}
// PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run
//
@ -458,6 +528,7 @@ func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo str
}
// ReviewCustomDeploymentProtectionRule approves or rejects custom deployment protection rules provided by a GitHub App for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-custom-deployment-protection-rules-for-a-workflow-run
//

View file

@ -118,13 +118,13 @@ func (s GistStats) String() string {
return Stringify(s)
}
// PullStats represents the number of total, merged, mergable and unmergeable
// PullStats represents the number of total, merged, mergeable and unmergeable
// pull-requests.
type PullStats struct {
TotalPulls *int `json:"total_pulls,omitempty"`
MergedPulls *int `json:"merged_pulls,omitempty"`
MergablePulls *int `json:"mergeable_pulls,omitempty"`
UnmergablePulls *int `json:"unmergeable_pulls,omitempty"`
TotalPulls *int `json:"total_pulls,omitempty"`
MergedPulls *int `json:"merged_pulls,omitempty"`
MergeablePulls *int `json:"mergeable_pulls,omitempty"`
UnmergeablePulls *int `json:"unmergeable_pulls,omitempty"`
}
func (s PullStats) String() string {

View file

@ -87,8 +87,8 @@ type CheckSuite struct {
// The following fields are only populated by Webhook events.
HeadCommit *Commit `json:"head_commit,omitempty"`
LatestCheckRunsCount *int64 `json:"latest_check_runs_count,omitempty"`
Rerequstable *bool `json:"rerequestable,omitempty"`
RunsRerequstable *bool `json:"runs_rerequestable,omitempty"`
Rerequestable *bool `json:"rerequestable,omitempty"`
RunsRerequestable *bool `json:"runs_rerequestable,omitempty"`
}
func (c CheckRun) String() string {

View file

@ -141,6 +141,15 @@ type AlertListOptions struct {
// The name of a code scanning tool. Only results by this tool will be listed.
ToolName string `url:"tool_name,omitempty"`
// The GUID of a code scanning tool. Only results by this tool will be listed.
ToolGUID string `url:"tool_guid,omitempty"`
// The direction to sort the results by. Possible values are: asc, desc. Default: desc.
Direction string `url:"direction,omitempty"`
// The property by which to sort the results. Possible values are: created, updated. Default: created.
Sort string `url:"sort,omitempty"`
ListCursorOptions
// Add ListOptions so offset pagination with integer type "page" query parameter is accepted
@ -391,7 +400,7 @@ func (s *CodeScanningService) UploadSarif(ctx context.Context, owner, repo strin
return nil, nil, err
}
// This will always return an error without unmarshalling the data
// This will always return an error without unmarshaling the data
resp, err := s.client.Do(ctx, req, nil)
// Even though there was an error, we still return the response
// in case the caller wants to inspect it further.

View file

@ -307,7 +307,7 @@ func (s *CopilotService) ListCopilotSeats(ctx context.Context, org string, opts
//
// To paginate through all seats, populate 'Page' with the number of the last page.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#list-all-copilot-seat-assignments-for-an-enterprise
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-user-management#list-all-copilot-seat-assignments-for-an-enterprise
//
//meta:operation GET /enterprises/{enterprise}/copilot/billing/seats
func (s *CopilotService) ListCopilotEnterpriseSeats(ctx context.Context, enterprise string, opts *ListOptions) (*ListCopilotSeatsResponse, *Response, error) {
@ -467,7 +467,7 @@ func (s *CopilotService) GetSeatDetails(ctx context.Context, org, user string) (
// GetEnterpriseMetrics gets Copilot usage metrics for an enterprise.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise
//
//meta:operation GET /enterprises/{enterprise}/copilot/metrics
func (s *CopilotService) GetEnterpriseMetrics(ctx context.Context, enterprise string, opts *CopilotMetricsListOptions) ([]*CopilotMetrics, *Response, error) {
@ -493,7 +493,7 @@ func (s *CopilotService) GetEnterpriseMetrics(ctx context.Context, enterprise st
// GetEnterpriseTeamMetrics gets Copilot usage metrics for an enterprise team.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise-team
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise-team
//
//meta:operation GET /enterprises/{enterprise}/team/{team_slug}/copilot/metrics
func (s *CopilotService) GetEnterpriseTeamMetrics(ctx context.Context, enterprise, team string, opts *CopilotMetricsListOptions) ([]*CopilotMetrics, *Response, error) {

View file

@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API.
Usage:
import "github.com/google/go-github/v68/github" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/google/go-github/v69/github" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/google/go-github/github" // with go modules disabled
Construct a new GitHub client, then use the various services on the client to
@ -138,11 +138,17 @@ To detect this condition of error, you can check if its type is
# Conditional Requests
The GitHub API has good support for conditional requests which will help
prevent you from burning through your rate limit, as well as help speed up your
application. go-github does not handle conditional requests directly, but is
instead designed to work with a caching http.Transport. We recommend using
https://github.com/gregjones/httpcache for that.
The GitHub REST API has good support for conditional HTTP requests
via the ETag header which will help prevent you from burning through your
rate limit, as well as help speed up your application. go-github does not
handle conditional requests directly, but is instead designed to work with a
caching http.Transport.
Typically, an RFC 7234 compliant HTTP cache such as https://github.com/gregjones/httpcache
is recommended. Alternatively, the https://github.com/bored-engineer/github-conditional-http-transport
package relies on (undocumented) GitHub specific cache logic and is
recommended when making requests using short-lived credentials such as a
GitHub App installation token.
Learn more about GitHub conditional requests at
https://docs.github.com/rest/overview/resources-in-the-rest-api#conditional-requests.

View file

@ -0,0 +1,163 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// NodeQueryOptions specifies the optional parameters to the EnterpriseService
// Node management APIs.
type NodeQueryOptions struct {
// UUID filters issues based on the node UUID.
UUID *string `url:"uuid,omitempty"`
// ClusterRoles filters the cluster roles from the cluster configuration file.
ClusterRoles *string `url:"cluster_roles,omitempty"`
}
// ClusterStatus represents a response from the ClusterStatus and ReplicationStatus methods.
type ClusterStatus struct {
Status *string `json:"status,omitempty"`
Nodes []*ClusterStatusNode `json:"nodes"`
}
// ClusterStatusNode represents the status of a cluster node.
type ClusterStatusNode struct {
Hostname *string `json:"hostname,omitempty"`
Status *string `json:"status,omitempty"`
Services []*ClusterStatusNodeServiceItem `json:"services"`
}
// ClusterStatusNodeServiceItem represents the status of a service running on a cluster node.
type ClusterStatusNodeServiceItem struct {
Status *string `json:"status,omitempty"`
Name *string `json:"name,omitempty"`
Details *string `json:"details,omitempty"`
}
// SystemRequirements represents a response from the CheckSystemRequirements method.
type SystemRequirements struct {
Status *string `json:"status,omitempty"`
Nodes []*SystemRequirementsNode `json:"nodes"`
}
// SystemRequirementsNode represents the status of a system node.
type SystemRequirementsNode struct {
Hostname *string `json:"hostname,omitempty"`
Status *string `json:"status,omitempty"`
RolesStatus []*SystemRequirementsNodeRoleStatus `json:"roles_status"`
}
// SystemRequirementsNodeRoleStatus represents the status of a role on a system node.
type SystemRequirementsNodeRoleStatus struct {
Status *string `json:"status,omitempty"`
Role *string `json:"role,omitempty"`
}
// NodeReleaseVersion represents a response from the GetNodeReleaseVersions method.
type NodeReleaseVersion struct {
Hostname *string `json:"hostname,omitempty"`
Version *ReleaseVersion `json:"version"`
}
// ReleaseVersion holds the release version information of the node.
type ReleaseVersion struct {
Version *string `json:"version,omitempty"`
Platform *string `json:"platform,omitempty"`
BuildID *string `json:"build_id,omitempty"`
BuildDate *string `json:"build_date,omitempty"`
}
// CheckSystemRequirements checks if GHES system nodes meet the system requirements.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-system-requirement-check-results-for-configured-cluster-nodes
//
//meta:operation GET /manage/v1/checks/system-requirements
func (s *EnterpriseService) CheckSystemRequirements(ctx context.Context) (*SystemRequirements, *Response, error) {
u := "manage/v1/checks/system-requirements"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
systemRequirements := new(SystemRequirements)
resp, err := s.client.Do(ctx, req, systemRequirements)
if err != nil {
return nil, resp, err
}
return systemRequirements, resp, nil
}
// ClusterStatus gets the status of all services running on each cluster node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-services-running-on-all-cluster-nodes
//
//meta:operation GET /manage/v1/cluster/status
func (s *EnterpriseService) ClusterStatus(ctx context.Context) (*ClusterStatus, *Response, error) {
u := "manage/v1/cluster/status"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
clusterStatus := new(ClusterStatus)
resp, err := s.client.Do(ctx, req, clusterStatus)
if err != nil {
return nil, resp, err
}
return clusterStatus, resp, nil
}
// ReplicationStatus gets the status of all services running on each replica node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-services-running-on-all-replica-nodes
//
//meta:operation GET /manage/v1/replication/status
func (s *EnterpriseService) ReplicationStatus(ctx context.Context, opts *NodeQueryOptions) (*ClusterStatus, *Response, error) {
u, err := addOptions("manage/v1/replication/status", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
status := new(ClusterStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// GetNodeReleaseVersions gets the version information deployed to each node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-all-ghes-release-versions-for-all-nodes
//
//meta:operation GET /manage/v1/version
func (s *EnterpriseService) GetNodeReleaseVersions(ctx context.Context, opts *NodeQueryOptions) ([]*NodeReleaseVersion, *Response, error) {
u, err := addOptions("manage/v1/version", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var releaseVersions []*NodeReleaseVersion
resp, err := s.client.Do(ctx, req, &releaseVersions)
if err != nil {
return nil, resp, err
}
return releaseVersions, resp, nil
}

View file

@ -0,0 +1,516 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"errors"
)
// ConfigApplyOptions is a struct to hold the options for the ConfigApply API and the response.
type ConfigApplyOptions struct {
// RunID is the ID of the run to get the status of. If empty a random one will be generated.
RunID *string `json:"run_id,omitempty"`
}
// ConfigApplyStatus is a struct to hold the response from the ConfigApply API.
type ConfigApplyStatus struct {
Running *bool `json:"running,omitempty"`
Successful *bool `json:"successful,omitempty"`
Nodes []*ConfigApplyStatusNode `json:"nodes"`
}
// ConfigApplyStatusNode is a struct to hold the response from the ConfigApply API.
type ConfigApplyStatusNode struct {
Hostname *string `json:"hostname,omitempty"`
Running *bool `json:"running,omitempty"`
Successful *bool `json:"successful,omitempty"`
RunID *string `json:"run_id,omitempty"`
}
// ConfigApplyEventsOptions is used to enable pagination.
type ConfigApplyEventsOptions struct {
LastRequestID *string `url:"last_request_id,omitempty"`
}
// ConfigApplyEvents is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEvents struct {
Nodes []*ConfigApplyEventsNode `json:"nodes"`
}
// ConfigApplyEventsNode is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEventsNode struct {
Node *string `json:"node,omitempty"`
LastRequestID *string `json:"last_request_id,omitempty"`
Events []*ConfigApplyEventsNodeEvent `json:"events"`
}
// ConfigApplyEventsNodeEvent is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEventsNodeEvent struct {
Timestamp *Timestamp `json:"timestamp,omitempty"`
SeverityText *string `json:"severity_text,omitempty"`
Body *string `json:"body,omitempty"`
EventName *string `json:"event_name,omitempty"`
Topology *string `json:"topology,omitempty"`
Hostname *string `json:"hostname,omitempty"`
ConfigRunID *string `json:"config_run_id,omitempty"`
TraceID *string `json:"trace_id,omitempty"`
SpanID *string `json:"span_id,omitempty"`
SpanParentID *int64 `json:"span_parent_id,omitempty"`
SpanDepth *int `json:"span_depth,omitempty"`
}
// InitialConfigOptions is a struct to hold the options for the InitialConfig API.
type InitialConfigOptions struct {
License string `url:"license"`
Password string `url:"password"`
}
// LicenseStatus is a struct to hold the response from the License API.
type LicenseStatus struct {
AdvancedSecurityEnabled *bool `json:"advancedSecurityEnabled,omitempty"`
AdvancedSecuritySeats *int `json:"advancedSecuritySeats,omitempty"`
ClusterSupport *bool `json:"clusterSupport,omitempty"`
Company *string `json:"company,omitempty"`
CroquetSupport *bool `json:"croquetSupport,omitempty"`
CustomTerms *bool `json:"customTerms,omitempty"`
Evaluation *bool `json:"evaluation,omitempty"`
ExpireAt *Timestamp `json:"expireAt,omitempty"`
InsightsEnabled *bool `json:"insightsEnabled,omitempty"`
InsightsExpireAt *Timestamp `json:"insightsExpireAt,omitempty"`
LearningLabEvaluationExpires *Timestamp `json:"learningLabEvaluationExpires,omitempty"`
LearningLabSeats *int `json:"learningLabSeats,omitempty"`
Perpetual *bool `json:"perpetual,omitempty"`
ReferenceNumber *string `json:"referenceNumber,omitempty"`
Seats *int `json:"seats,omitempty"`
SSHAllowed *bool `json:"sshAllowed,omitempty"`
SupportKey *string `json:"supportKey,omitempty"`
UnlimitedSeating *bool `json:"unlimitedSeating,omitempty"`
}
// UploadLicenseOptions is a struct to hold the options for the UploadLicense API.
type UploadLicenseOptions struct {
License string `url:"license"`
}
// LicenseCheck is a struct to hold the response from the LicenseStatus API.
type LicenseCheck struct {
Status *string `json:"status,omitempty"`
}
// ConfigSettings is a struct to hold the response from the Settings API.
// There are many fields that link to other structs.
type ConfigSettings struct {
PrivateMode *bool `json:"private_mode,omitempty"`
PublicPages *bool `json:"public_pages,omitempty"`
SubdomainIsolation *bool `json:"subdomain_isolation,omitempty"`
SignupEnabled *bool `json:"signup_enabled,omitempty"`
GithubHostname *string `json:"github_hostname,omitempty"`
IdenticonsHost *string `json:"identicons_host,omitempty"`
HTTPProxy *string `json:"http_proxy,omitempty"`
AuthMode *string `json:"auth_mode,omitempty"`
ExpireSessions *bool `json:"expire_sessions,omitempty"`
AdminPassword *string `json:"admin_password,omitempty"`
ConfigurationID *int64 `json:"configuration_id,omitempty"`
ConfigurationRunCount *int `json:"configuration_run_count,omitempty"`
Avatar *ConfigSettingsAvatar `json:"avatar,omitempty"`
Customer *ConfigSettingsCustomer `json:"customer,omitempty"`
License *ConfigSettingsLicenseSettings `json:"license,omitempty"`
GithubSSL *ConfigSettingsGithubSSL `json:"github_ssl,omitempty"`
LDAP *ConfigSettingsLDAP `json:"ldap,omitempty"`
CAS *ConfigSettingsCAS `json:"cas,omitempty"`
SAML *ConfigSettingsSAML `json:"saml,omitempty"`
GithubOAuth *ConfigSettingsGithubOAuth `json:"github_oauth,omitempty"`
SMTP *ConfigSettingsSMTP `json:"smtp,omitempty"`
NTP *ConfigSettingsNTP `json:"ntp,omitempty"`
Timezone *string `json:"timezone,omitempty"`
SNMP *ConfigSettingsSNMP `json:"snmp,omitempty"`
Syslog *ConfigSettingsSyslog `json:"syslog,omitempty"`
Assets *string `json:"assets,omitempty"`
Pages *ConfigSettingsPagesSettings `json:"pages,omitempty"`
Collectd *ConfigSettingsCollectd `json:"collectd,omitempty"`
Mapping *ConfigSettingsMapping `json:"mapping,omitempty"`
LoadBalancer *string `json:"load_balancer,omitempty"`
}
// ConfigSettingsAvatar is a struct to hold the response from the Settings API.
type ConfigSettingsAvatar struct {
Enabled *bool `json:"enabled,omitempty"`
URI *string `json:"uri,omitempty"`
}
// ConfigSettingsCustomer is a struct to hold the response from the Settings API.
type ConfigSettingsCustomer struct {
Name *string `json:"name,omitempty"`
Email *string `json:"email,omitempty"`
UUID *string `json:"uuid,omitempty"`
Secret *string `json:"secret,omitempty"`
PublicKeyData *string `json:"public_key_data,omitempty"`
}
// ConfigSettingsLicenseSettings is a struct to hold the response from the Settings API.
type ConfigSettingsLicenseSettings struct {
Seats *int `json:"seats,omitempty"`
Evaluation *bool `json:"evaluation,omitempty"`
Perpetual *bool `json:"perpetual,omitempty"`
UnlimitedSeating *bool `json:"unlimited_seating,omitempty"`
SupportKey *string `json:"support_key,omitempty"`
SSHAllowed *bool `json:"ssh_allowed,omitempty"`
ClusterSupport *bool `json:"cluster_support,omitempty"`
ExpireAt *Timestamp `json:"expire_at,omitempty"`
}
// ConfigSettingsGithubSSL is a struct to hold the response from the Settings API.
type ConfigSettingsGithubSSL struct {
Enabled *bool `json:"enabled,omitempty"`
Cert *string `json:"cert,omitempty"`
Key *string `json:"key,omitempty"`
}
// ConfigSettingsLDAP is a struct to hold the response from the Settings API.
type ConfigSettingsLDAP struct {
Host *string `json:"host,omitempty"`
Port *int `json:"port,omitempty"`
Base []string `json:"base,omitempty"`
UID *string `json:"uid,omitempty"`
BindDN *string `json:"bind_dn,omitempty"`
Password *string `json:"password,omitempty"`
Method *string `json:"method,omitempty"`
SearchStrategy *string `json:"search_strategy,omitempty"`
UserGroups []string `json:"user_groups,omitempty"`
AdminGroup *string `json:"admin_group,omitempty"`
VirtualAttributeEnabled *bool `json:"virtual_attribute_enabled,omitempty"`
RecursiveGroupSearch *bool `json:"recursive_group_search,omitempty"`
PosixSupport *bool `json:"posix_support,omitempty"`
UserSyncEmails *bool `json:"user_sync_emails,omitempty"`
UserSyncKeys *bool `json:"user_sync_keys,omitempty"`
UserSyncInterval *int `json:"user_sync_interval,omitempty"`
TeamSyncInterval *int `json:"team_sync_interval,omitempty"`
SyncEnabled *bool `json:"sync_enabled,omitempty"`
Reconciliation *ConfigSettingsLDAPReconciliation `json:"reconciliation,omitempty"`
Profile *ConfigSettingsLDAPProfile `json:"profile,omitempty"`
}
// ConfigSettingsLDAPReconciliation is part of the ConfigSettingsLDAP struct.
type ConfigSettingsLDAPReconciliation struct {
User *string `json:"user,omitempty"`
Org *string `json:"org,omitempty"`
}
// ConfigSettingsLDAPProfile is part of the ConfigSettingsLDAP struct.
type ConfigSettingsLDAPProfile struct {
UID *string `json:"uid,omitempty"`
Name *string `json:"name,omitempty"`
Mail *string `json:"mail,omitempty"`
Key *string `json:"key,omitempty"`
}
// ConfigSettingsCAS is a struct to hold the response from the Settings API.
type ConfigSettingsCAS struct {
URL *string `json:"url,omitempty"`
}
// ConfigSettingsSAML is a struct to hold the response from the Settings API.
type ConfigSettingsSAML struct {
SSOURL *string `json:"sso_url,omitempty"`
Certificate *string `json:"certificate,omitempty"`
CertificatePath *string `json:"certificate_path,omitempty"`
Issuer *string `json:"issuer,omitempty"`
IDPInitiatedSSO *bool `json:"idp_initiated_sso,omitempty"`
DisableAdminDemote *bool `json:"disable_admin_demote,omitempty"`
}
// ConfigSettingsGithubOAuth is a struct to hold the response from the Settings API.
type ConfigSettingsGithubOAuth struct {
ClientID *string `json:"client_id,omitempty"`
ClientSecret *string `json:"client_secret,omitempty"`
OrganizationName *string `json:"organization_name,omitempty"`
OrganizationTeam *string `json:"organization_team,omitempty"`
}
// ConfigSettingsSMTP is a struct to hold the response from the Settings API.
type ConfigSettingsSMTP struct {
Enabled *bool `json:"enabled,omitempty"`
Address *string `json:"address,omitempty"`
Authentication *string `json:"authentication,omitempty"`
Port *string `json:"port,omitempty"`
Domain *string `json:"domain,omitempty"`
Username *string `json:"username,omitempty"`
UserName *string `json:"user_name,omitempty"`
EnableStarttlsAuto *bool `json:"enable_starttls_auto,omitempty"`
Password *string `json:"password,omitempty"`
DiscardToNoreplyAddress *bool `json:"discard-to-noreply-address,omitempty"`
SupportAddress *string `json:"support_address,omitempty"`
SupportAddressType *string `json:"support_address_type,omitempty"`
NoreplyAddress *string `json:"noreply_address,omitempty"`
}
// ConfigSettingsNTP is a struct to hold the response from the Settings API.
type ConfigSettingsNTP struct {
PrimaryServer *string `json:"primary_server,omitempty"`
SecondaryServer *string `json:"secondary_server,omitempty"`
}
// ConfigSettingsSNMP is a struct to hold the response from the Settings API.
type ConfigSettingsSNMP struct {
Enabled *bool `json:"enabled,omitempty"`
Community *string `json:"community,omitempty"`
}
// ConfigSettingsSyslog is a struct to hold the response from the Settings API.
type ConfigSettingsSyslog struct {
Enabled *bool `json:"enabled,omitempty"`
Server *string `json:"server,omitempty"`
ProtocolName *string `json:"protocol_name,omitempty"`
}
// ConfigSettingsPagesSettings is a struct to hold the response from the Settings API.
type ConfigSettingsPagesSettings struct {
Enabled *bool `json:"enabled,omitempty"`
}
// ConfigSettingsCollectd is a struct to hold the response from the Settings API.
type ConfigSettingsCollectd struct {
Enabled *bool `json:"enabled,omitempty"`
Server *string `json:"server,omitempty"`
Port *int `json:"port,omitempty"`
Encryption *string `json:"encryption,omitempty"`
Username *string `json:"username,omitempty"`
Password *string `json:"password,omitempty"`
}
// ConfigSettingsMapping is a struct to hold the response from the Settings API.
type ConfigSettingsMapping struct {
Enabled *bool `json:"enabled,omitempty"`
Tileserver *string `json:"tileserver,omitempty"`
Basemap *string `json:"basemap,omitempty"`
Token *string `json:"token,omitempty"`
}
// NodeMetadataStatus is a struct to hold the response from the NodeMetadata API.
type NodeMetadataStatus struct {
Topology *string `json:"topology,omitempty"`
Nodes []*NodeDetails `json:"nodes"`
}
// NodeDetails is a struct to hold the response from the NodeMetadata API.
type NodeDetails struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
ClusterRoles []string `json:"cluster_roles,omitempty"`
}
// ConfigApplyEvents gets events from the command ghe-config-apply.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#list-events-from-ghe-config-apply
//
//meta:operation GET /manage/v1/config/apply/events
func (s *EnterpriseService) ConfigApplyEvents(ctx context.Context, opts *ConfigApplyEventsOptions) (*ConfigApplyEvents, *Response, error) {
u, err := addOptions("manage/v1/config/apply/events", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
configApplyEvents := new(ConfigApplyEvents)
resp, err := s.client.Do(ctx, req, configApplyEvents)
if err != nil {
return nil, resp, err
}
return configApplyEvents, resp, nil
}
// InitialConfig initializes the GitHub Enterprise instance with a license and password.
// After initializing the instance, you need to run an apply to apply the configuration.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#initialize-instance-configuration-with-license-and-password
//
//meta:operation POST /manage/v1/config/init
func (s *EnterpriseService) InitialConfig(ctx context.Context, license, password string) (*Response, error) {
u := "manage/v1/config/init"
opts := &InitialConfigOptions{
License: license,
Password: password,
}
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// License gets the current license information for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-enterprise-license-information
//
//meta:operation GET /manage/v1/config/license
func (s *EnterpriseService) License(ctx context.Context) ([]*LicenseStatus, *Response, error) {
u := "manage/v1/config/license"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var licenseStatus []*LicenseStatus
resp, err := s.client.Do(ctx, req, &licenseStatus)
if err != nil {
return nil, resp, err
}
return licenseStatus, resp, nil
}
// UploadLicense uploads a new license to the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#upload-an-enterprise-license
//
//meta:operation PUT /manage/v1/config/license
func (s *EnterpriseService) UploadLicense(ctx context.Context, license string) (*Response, error) {
u := "manage/v1/config/license"
opts := &UploadLicenseOptions{
License: license,
}
req, err := s.client.NewRequest("PUT", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// LicenseStatus gets the current license status for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#check-a-license
//
//meta:operation GET /manage/v1/config/license/check
func (s *EnterpriseService) LicenseStatus(ctx context.Context) ([]*LicenseCheck, *Response, error) {
u := "manage/v1/config/license/check"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var checks []*LicenseCheck
resp, err := s.client.Do(ctx, req, &checks)
if err != nil {
return nil, resp, err
}
return checks, resp, nil
}
// NodeMetadata gets the metadata for all nodes in the GitHub Enterprise instance.
// This is required for clustered setups.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-ghes-node-metadata-for-all-nodes
//
//meta:operation GET /manage/v1/config/nodes
func (s *EnterpriseService) NodeMetadata(ctx context.Context, opts *NodeQueryOptions) (*NodeMetadataStatus, *Response, error) {
u, err := addOptions("manage/v1/config/nodes", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
status := new(NodeMetadataStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// Settings gets the current configuration settings for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-ghes-settings
//
//meta:operation GET /manage/v1/config/settings
func (s *EnterpriseService) Settings(ctx context.Context) (*ConfigSettings, *Response, error) {
u := "manage/v1/config/settings"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
configSettings := new(ConfigSettings)
resp, err := s.client.Do(ctx, req, configSettings)
if err != nil {
return nil, resp, err
}
return configSettings, resp, nil
}
// UpdateSettings updates the configuration settings for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-settings
//
//meta:operation PUT /manage/v1/config/settings
func (s *EnterpriseService) UpdateSettings(ctx context.Context, opts *ConfigSettings) (*Response, error) {
u := "manage/v1/config/settings"
if opts == nil {
return nil, errors.New("opts should not be nil")
}
req, err := s.client.NewRequest("PUT", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// ConfigApply triggers a configuration apply run on the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#trigger-a-ghe-config-apply-run
//
//meta:operation POST /manage/v1/config/apply
func (s *EnterpriseService) ConfigApply(ctx context.Context, opts *ConfigApplyOptions) (*ConfigApplyOptions, *Response, error) {
u := "manage/v1/config/apply"
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
configApplyOptions := new(ConfigApplyOptions)
resp, err := s.client.Do(ctx, req, configApplyOptions)
if err != nil {
return nil, resp, err
}
return configApplyOptions, resp, nil
}
// ConfigApplyStatus gets the status of a ghe-config-apply run on the GitHub Enterprise instance.
// You can request lat one or specific id one.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-a-ghe-config-apply-run
//
//meta:operation GET /manage/v1/config/apply
func (s *EnterpriseService) ConfigApplyStatus(ctx context.Context, opts *ConfigApplyOptions) (*ConfigApplyStatus, *Response, error) {
u := "manage/v1/config/apply"
req, err := s.client.NewRequest("GET", u, opts)
if err != nil {
return nil, nil, err
}
status := new(ConfigApplyStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}

View file

@ -0,0 +1,94 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// MaintenanceOperationStatus represents the message to be displayed when the instance gets a maintenance operation request.
type MaintenanceOperationStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Message *string `json:"message,omitempty"`
}
// MaintenanceStatus represents the status of maintenance mode for all nodes.
type MaintenanceStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Status *string `json:"status,omitempty"`
ScheduledTime *Timestamp `json:"scheduled_time,omitempty"`
ConnectionServices []*ConnectionServiceItem `json:"connection_services,omitempty"`
CanUnsetMaintenance *bool `json:"can_unset_maintenance,omitempty"`
IPExceptionList []string `json:"ip_exception_list,omitempty"`
MaintenanceModeMessage *string `json:"maintenance_mode_message,omitempty"`
}
// ConnectionServiceItem represents the connection services for the maintenance status.
type ConnectionServiceItem struct {
Name *string `json:"name,omitempty"`
Number *int `json:"number,omitempty"`
}
// MaintenanceOptions represents the options for setting the maintenance mode for the instance.
// When can be a string, so we can't use a Timestamp type.
type MaintenanceOptions struct {
Enabled bool `json:"enabled"`
UUID *string `json:"uuid,omitempty"`
When *string `json:"when,omitempty"`
IPExceptionList []string `json:"ip_exception_list,omitempty"`
MaintenanceModeMessage *string `json:"maintenance_mode_message,omitempty"`
}
// GetMaintenanceStatus gets the status of maintenance mode for all nodes.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-maintenance-mode
//
//meta:operation GET /manage/v1/maintenance
func (s *EnterpriseService) GetMaintenanceStatus(ctx context.Context, opts *NodeQueryOptions) ([]*MaintenanceStatus, *Response, error) {
u, err := addOptions("manage/v1/maintenance", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var status []*MaintenanceStatus
resp, err := s.client.Do(ctx, req, &status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// CreateMaintenance sets the maintenance mode for the instance.
// With the enable parameter we can control to put instance into maintenance mode or not. With false we can disable the maintenance mode.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-the-status-of-maintenance-mode
//
//meta:operation POST /manage/v1/maintenance
func (s *EnterpriseService) CreateMaintenance(ctx context.Context, enable bool, opts *MaintenanceOptions) ([]*MaintenanceOperationStatus, *Response, error) {
u := "manage/v1/maintenance"
opts.Enabled = enable
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
var i []*MaintenanceOperationStatus
resp, err := s.client.Do(ctx, req, &i)
if err != nil {
return nil, resp, err
}
return i, resp, nil
}

View file

@ -0,0 +1,99 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// SSHKeyStatus represents the status of a SSH key operation.
type SSHKeyStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Message *string `json:"message,omitempty"`
Modified *bool `json:"modified,omitempty"`
}
// SSHKeyOptions specifies the parameters to the SSH create and delete functions.
type SSHKeyOptions struct {
// Key is the SSH key to add to the instance.
Key string `json:"key"`
}
// ClusterSSHKey represents the SSH keys configured for the instance.
type ClusterSSHKey struct {
Key *string `json:"key,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
}
// DeleteSSHKey deletes the SSH key from the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#delete-a-ssh-key
//
//meta:operation DELETE /manage/v1/access/ssh
func (s *EnterpriseService) DeleteSSHKey(ctx context.Context, key string) ([]*SSHKeyStatus, *Response, error) {
u := "manage/v1/access/ssh"
opts := &SSHKeyOptions{
Key: key,
}
req, err := s.client.NewRequest("DELETE", u, opts)
if err != nil {
return nil, nil, err
}
var sshStatus []*SSHKeyStatus
resp, err := s.client.Do(ctx, req, &sshStatus)
if err != nil {
return nil, resp, err
}
return sshStatus, resp, nil
}
// GetSSHKey gets the SSH keys configured for the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-configured-ssh-keys
//
//meta:operation GET /manage/v1/access/ssh
func (s *EnterpriseService) GetSSHKey(ctx context.Context) ([]*ClusterSSHKey, *Response, error) {
u := "manage/v1/access/ssh"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var sshKeys []*ClusterSSHKey
resp, err := s.client.Do(ctx, req, &sshKeys)
if err != nil {
return nil, resp, err
}
return sshKeys, resp, nil
}
// CreateSSHKey adds a new SSH key to the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-a-new-ssh-key
//
//meta:operation POST /manage/v1/access/ssh
func (s *EnterpriseService) CreateSSHKey(ctx context.Context, key string) ([]*SSHKeyStatus, *Response, error) {
u := "manage/v1/access/ssh"
opts := &SSHKeyOptions{
Key: key,
}
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
var sshKeyResponse []*SSHKeyStatus
resp, err := s.client.Do(ctx, req, &sshKeyResponse)
if err != nil {
return nil, resp, err
}
return sshKeyResponse, resp, nil
}

View file

@ -0,0 +1,118 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
)
// CreateRepositoryRuleset creates a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#create-an-enterprise-repository-ruleset
//
//meta:operation POST /enterprises/{enterprise}/rulesets
func (s *EnterpriseService) CreateRepositoryRuleset(ctx context.Context, enterprise string, ruleset RepositoryRuleset) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets", enterprise)
req, err := s.client.NewRequest("POST", u, ruleset)
if err != nil {
return nil, nil, err
}
var rs *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &rs)
if err != nil {
return nil, resp, err
}
return rs, resp, nil
}
// GetRepositoryRuleset gets a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#get-an-enterprise-repository-ruleset
//
//meta:operation GET /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) GetRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRepositoryRuleset updates a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#update-an-enterprise-repository-ruleset
//
//meta:operation PUT /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) UpdateRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64, ruleset RepositoryRuleset) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("PUT", u, ruleset)
if err != nil {
return nil, nil, err
}
var rs *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &rs)
if err != nil {
return nil, resp, err
}
return rs, resp, nil
}
// UpdateRepositoryRulesetClearBypassActor clears the bypass actors for a repository ruleset for the specified enterprise.
//
// This function is necessary as the UpdateRepositoryRuleset function does not marshal ByPassActor if passed as an empty array.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#update-an-enterprise-repository-ruleset
//
//meta:operation PUT /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) UpdateRepositoryRulesetClearBypassActor(ctx context.Context, enterprise string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
rsClearBypassActor := rulesetClearBypassActors{}
req, err := s.client.NewRequest("PUT", u, rsClearBypassActor)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// DeleteRepositoryRuleset deletes a repository ruleset from the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#delete-an-enterprise-repository-ruleset
//
//meta:operation DELETE /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) DeleteRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -1521,14 +1521,73 @@ type RepositoryImportEvent struct {
//
// GitHub API docs: https://docs.github.com/en/webhooks/webhook-events-and-payloads#repository_ruleset
type RepositoryRulesetEvent struct {
Action *string `json:"action,omitempty"`
Enterprise *Enterprise `json:"enterprise,omitempty"`
Installation *Installation `json:"installation,omitempty"`
Organization *Organization `json:"organization,omitempty"`
Repository *Repository `json:"repository,omitempty"`
RepositoryRuleset *RepositoryRuleset `json:"repository_ruleset"`
Changes *RepositoryRulesetEditedChanges `json:"changes,omitempty"`
Sender *User `json:"sender"`
Action *string `json:"action,omitempty"`
Enterprise *Enterprise `json:"enterprise,omitempty"`
Installation *Installation `json:"installation,omitempty"`
Organization *Organization `json:"organization,omitempty"`
Repository *Repository `json:"repository,omitempty"`
RepositoryRuleset *RepositoryRuleset `json:"repository_ruleset"`
Changes *RepositoryRulesetChanges `json:"changes,omitempty"`
Sender *User `json:"sender"`
}
// RepositoryRulesetChanges represents the changes made to a repository ruleset.
type RepositoryRulesetChanges struct {
Name *RepositoryRulesetChangeSource `json:"name,omitempty"`
Enforcement *RepositoryRulesetChangeSource `json:"enforcement,omitempty"`
Conditions *RepositoryRulesetChangedConditions `json:"conditions,omitempty"`
Rules *RepositoryRulesetChangedRules `json:"rules,omitempty"`
}
// RepositoryRulesetChangeSource represents a source change for the ruleset.
type RepositoryRulesetChangeSource struct {
From *string `json:"from,omitempty"`
}
// RepositoryRulesetChangeSources represents multiple source changes for the ruleset.
type RepositoryRulesetChangeSources struct {
From []string `json:"from,omitempty"`
}
// RepositoryRulesetChangedConditions holds changes to conditions in a ruleset.
type RepositoryRulesetChangedConditions struct {
Added []*RepositoryRulesetConditions `json:"added,omitempty"`
Deleted []*RepositoryRulesetConditions `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedConditions `json:"updated,omitempty"`
}
// RepositoryRulesetUpdatedConditions represents the edited updates to conditions in a ruleset.
type RepositoryRulesetUpdatedConditions struct {
Condition *RepositoryRulesetConditions `json:"condition,omitempty"`
Changes *RepositoryRulesetUpdatedCondition `json:"changes,omitempty"`
}
// RepositoryRulesetUpdatedCondition represents the changes to a condition in a ruleset.
type RepositoryRulesetUpdatedCondition struct {
ConditionType *RepositoryRulesetChangeSource `json:"condition_type,omitempty"`
Target *RepositoryRulesetChangeSource `json:"target,omitempty"`
Include *RepositoryRulesetChangeSources `json:"include,omitempty"`
Exclude *RepositoryRulesetChangeSources `json:"exclude,omitempty"`
}
// RepositoryRulesetChangedRules holds changes to rules in a ruleset.
type RepositoryRulesetChangedRules struct {
Added []*RepositoryRule `json:"added,omitempty"`
Deleted []*RepositoryRule `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedRules `json:"updated,omitempty"`
}
// RepositoryRulesetUpdatedRules holds updates to rules in a ruleset.
type RepositoryRulesetUpdatedRules struct {
Rule *RepositoryRule `json:"rule,omitempty"`
Changes *RepositoryRulesetChangedRule `json:"changes,omitempty"`
}
// RepositoryRulesetChangedRule holds changes made to a rule in a ruleset.
type RepositoryRulesetChangedRule struct {
Configuration *RepositoryRulesetChangeSource `json:"configuration,omitempty"`
RuleType *RepositoryRulesetChangeSource `json:"rule_type,omitempty"`
Pattern *RepositoryRulesetChangeSource `json:"pattern,omitempty"`
}
// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved.

View file

@ -1,6 +1,6 @@
// Copyright 2013 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by BSD-style
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github

View file

@ -19,6 +19,7 @@ import (
"net/http"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
@ -28,7 +29,7 @@ import (
)
const (
Version = "v68.0.0"
Version = "v69.2.0"
defaultAPIVersion = "2022-11-28"
defaultBaseURL = "https://api.github.com/"
@ -38,7 +39,9 @@ const (
headerAPIVersion = "X-Github-Api-Version"
headerRateLimit = "X-Ratelimit-Limit"
headerRateRemaining = "X-Ratelimit-Remaining"
headerRateUsed = "X-Ratelimit-Used"
headerRateReset = "X-Ratelimit-Reset"
headerRateResource = "X-Ratelimit-Resource"
headerOTP = "X-Github-Otp"
headerRetryAfter = "Retry-After"
@ -155,8 +158,9 @@ var errNonNilContext = errors.New("context must be non-nil")
// A Client manages communication with the GitHub API.
type Client struct {
clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
client *http.Client // HTTP client used to communicate with the API.
clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
client *http.Client // HTTP client used to communicate with the API.
clientIgnoreRedirects *http.Client // HTTP client used to communicate with the API on endpoints where we don't want to follow redirects.
// Base URL for API requests. Defaults to the public GitHub API, but can be
// set to a domain endpoint to use with GitHub Enterprise. BaseURL should
@ -173,6 +177,13 @@ type Client struct {
rateLimits [Categories]Rate // Rate limits for the client as determined by the most recent API calls.
secondaryRateLimitReset time.Time // Secondary rate limit reset for the client as determined by the most recent API calls.
// If specified, Client will block requests for at most this duration in case of reaching a secondary
// rate limit
MaxSecondaryRateLimitRetryAfterDuration time.Duration
// Whether to respect rate limit headers on endpoints that return 302 redirections to artifacts
RateLimitRedirectionalEndpoints bool
common service // Reuse a single struct instead of allocating one for each service on the heap.
// Services used for talking to different parts of the GitHub API.
@ -394,6 +405,14 @@ func (c *Client) initialize() {
if c.client == nil {
c.client = &http.Client{}
}
// Copy the main http client into the IgnoreRedirects one, overriding the `CheckRedirect` func
c.clientIgnoreRedirects = &http.Client{}
c.clientIgnoreRedirects.Transport = c.client.Transport
c.clientIgnoreRedirects.Timeout = c.client.Timeout
c.clientIgnoreRedirects.Jar = c.client.Jar
c.clientIgnoreRedirects.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
if c.BaseURL == nil {
c.BaseURL, _ = url.Parse(defaultBaseURL)
}
@ -448,11 +467,12 @@ func (c *Client) copy() *Client {
c.clientMu.Lock()
// can't use *c here because that would copy mutexes by value.
clone := Client{
client: &http.Client{},
UserAgent: c.UserAgent,
BaseURL: c.BaseURL,
UploadURL: c.UploadURL,
secondaryRateLimitReset: c.secondaryRateLimitReset,
client: &http.Client{},
UserAgent: c.UserAgent,
BaseURL: c.BaseURL,
UploadURL: c.UploadURL,
RateLimitRedirectionalEndpoints: c.RateLimitRedirectionalEndpoints,
secondaryRateLimitReset: c.secondaryRateLimitReset,
}
c.clientMu.Unlock()
if c.client != nil {
@ -506,7 +526,7 @@ func WithVersion(version string) RequestOption {
// request body.
func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.BaseURL.Path, "/") {
return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL)
return nil, fmt.Errorf("baseURL must have a trailing slash, but %q does not", c.BaseURL)
}
u, err := c.BaseURL.Parse(urlStr)
@ -552,7 +572,7 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...Req
// Body is sent with Content-Type: application/x-www-form-urlencoded.
func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.BaseURL.Path, "/") {
return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL)
return nil, fmt.Errorf("baseURL must have a trailing slash, but %q does not", c.BaseURL)
}
u, err := c.BaseURL.Parse(urlStr)
@ -584,7 +604,7 @@ func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOp
// Relative URLs should always be specified without a preceding slash.
func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.UploadURL.Path, "/") {
return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL)
return nil, fmt.Errorf("uploadURL must have a trailing slash, but %q does not", c.UploadURL)
}
u, err := c.UploadURL.Parse(urlStr)
if err != nil {
@ -750,11 +770,17 @@ func parseRate(r *http.Response) Rate {
if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
rate.Remaining, _ = strconv.Atoi(remaining)
}
if used := r.Header.Get(headerRateUsed); used != "" {
rate.Used, _ = strconv.Atoi(used)
}
if reset := r.Header.Get(headerRateReset); reset != "" {
if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
rate.Reset = Timestamp{time.Unix(v, 0)}
}
}
if resource := r.Header.Get(headerRateResource); resource != "" {
rate.Resource = resource
}
return rate
}
@ -801,19 +827,23 @@ func parseTokenExpiration(r *http.Response) Timestamp {
type requestContext uint8
const (
bypassRateLimitCheck requestContext = iota
// BypassRateLimitCheck prevents a pre-emptive check for exceeded primary rate limits
// Specify this by providing a context with this key, e.g.
// context.WithValue(context.Background(), github.BypassRateLimitCheck, true)
BypassRateLimitCheck requestContext = iota
SleepUntilPrimaryRateLimitResetWhenRateLimited
)
// BareDo sends an API request and lets you handle the api response. If an error
// or API Error occurs, the error will contain more information. Otherwise you
// are supposed to read and close the response's Body. If rate limit is exceeded
// and reset time is in the future, BareDo returns *RateLimitError immediately
// without making a network API call.
// bareDo sends an API request using `caller` http.Client passed in the parameters
// and lets you handle the api response. If an error or API Error occurs, the error
// will contain more information. Otherwise you are supposed to read and close the
// response's Body. If rate limit is exceeded and reset time is in the future,
// bareDo returns *RateLimitError immediately without making a network API call.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, error) {
func (c *Client) bareDo(ctx context.Context, caller *http.Client, req *http.Request) (*Response, error) {
if ctx == nil {
return nil, errNonNilContext
}
@ -822,7 +852,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
rateLimitCategory := GetRateLimitCategory(req.Method, req.URL.Path)
if bypass := ctx.Value(bypassRateLimitCheck); bypass == nil {
if bypass := ctx.Value(BypassRateLimitCheck); bypass == nil {
// If we've hit rate limit, don't make further requests before Reset time.
if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil {
return &Response{
@ -838,7 +868,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
}
}
resp, err := c.client.Do(req)
resp, err := caller.Do(req)
var response *Response
if resp != nil {
response = newResponse(resp)
@ -897,12 +927,16 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
return response, err
}
// retry the request once when the rate limit has reset
return c.BareDo(context.WithValue(req.Context(), SleepUntilPrimaryRateLimitResetWhenRateLimited, nil), req)
return c.bareDo(context.WithValue(req.Context(), SleepUntilPrimaryRateLimitResetWhenRateLimited, nil), caller, req)
}
// Update the secondary rate limit if we hit it.
rerr, ok := err.(*AbuseRateLimitError)
if ok && rerr.RetryAfter != nil {
// if a max duration is specified, make sure that we are waiting at most this duration
if c.MaxSecondaryRateLimitRetryAfterDuration > 0 && rerr.GetRetryAfter() > c.MaxSecondaryRateLimitRetryAfterDuration {
rerr.RetryAfter = &c.MaxSecondaryRateLimitRetryAfterDuration
}
c.rateMu.Lock()
c.secondaryRateLimitReset = time.Now().Add(*rerr.RetryAfter)
c.rateMu.Unlock()
@ -911,6 +945,72 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
return response, err
}
// BareDo sends an API request and lets you handle the api response. If an error
// or API Error occurs, the error will contain more information. Otherwise you
// are supposed to read and close the response's Body. If rate limit is exceeded
// and reset time is in the future, BareDo returns *RateLimitError immediately
// without making a network API call.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, error) {
return c.bareDo(ctx, c.client, req)
}
// bareDoIgnoreRedirects has the exact same behavior as BareDo but stops at the first
// redirection code returned by the API. If a redirection is returned by the api, bareDoIgnoreRedirects
// returns a *RedirectionError.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) bareDoIgnoreRedirects(ctx context.Context, req *http.Request) (*Response, error) {
return c.bareDo(ctx, c.clientIgnoreRedirects, req)
}
var errInvalidLocation = errors.New("invalid or empty Location header in redirection response")
// bareDoUntilFound has the exact same behavior as BareDo but only follows 301s, up to maxRedirects times. If it receives
// a 302, it will parse the Location header into a *url.URL and return that.
// This is useful for endpoints that return a 302 in successful cases but still might return 301s for
// permanent redirections.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) bareDoUntilFound(ctx context.Context, req *http.Request, maxRedirects int) (*url.URL, *Response, error) {
response, err := c.bareDoIgnoreRedirects(ctx, req)
if err != nil {
rerr, ok := err.(*RedirectionError)
if ok {
// If we receive a 302, transform potential relative locations into absolute and return it.
if rerr.StatusCode == http.StatusFound {
if rerr.Location == nil {
return nil, nil, errInvalidLocation
}
newURL := c.BaseURL.ResolveReference(rerr.Location)
return newURL, response, nil
}
// If permanent redirect response is returned, follow it
if maxRedirects > 0 && rerr.StatusCode == http.StatusMovedPermanently {
if rerr.Location == nil {
return nil, nil, errInvalidLocation
}
newURL := c.BaseURL.ResolveReference(rerr.Location)
newRequest := req.Clone(ctx)
newRequest.URL = newURL
return c.bareDoUntilFound(ctx, newRequest, maxRedirects-1)
}
// If we reached the maximum amount of redirections, return an error
if maxRedirects <= 0 && rerr.StatusCode == http.StatusMovedPermanently {
return nil, response, fmt.Errorf("reached the maximum amount of redirections: %w", err)
}
return nil, response, fmt.Errorf("unexpected redirection response: %w", err)
}
}
// If we don't receive a redirection, forward the response and potential error
return nil, response, err
}
// Do sends an API request and returns the API response. The API response is
// JSON decoded and stored in the value pointed to by v, or returned as an
// error if an API error has occurred. If v implements the io.Writer interface,
@ -1034,7 +1134,8 @@ GitHub API docs: https://docs.github.com/rest/#client-errors
type ErrorResponse struct {
Response *http.Response `json:"-"` // HTTP response that caused this error
Message string `json:"message"` // error message
Errors []Error `json:"errors"` // more detail on individual errors
//nolint:sliceofpointers
Errors []Error `json:"errors"` // more detail on individual errors
// Block is only populated on certain types of errors such as code 451.
Block *ErrorBlock `json:"block,omitempty"`
// Most errors will also include a documentation_url field pointing
@ -1196,6 +1297,40 @@ func (r *AbuseRateLimitError) Is(target error) bool {
compareHTTPResponse(r.Response, v.Response)
}
// RedirectionError represents a response that returned a redirect status code:
//
// 301 (Moved Permanently)
// 302 (Found)
// 303 (See Other)
// 307 (Temporary Redirect)
// 308 (Permanent Redirect)
//
// If there was a valid Location header included, it will be parsed to a URL. You should use
// `BaseURL.ResolveReference()` to enrich it with the correct hostname where needed.
type RedirectionError struct {
Response *http.Response // HTTP response that caused this error
StatusCode int
Location *url.URL // location header of the redirection if present
}
func (r *RedirectionError) Error() string {
return fmt.Sprintf("%v %v: %d location %v",
r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
r.StatusCode, sanitizeURL(r.Location))
}
// Is returns whether the provided error equals this error.
func (r *RedirectionError) Is(target error) bool {
v, ok := target.(*RedirectionError)
if !ok {
return false
}
return r.StatusCode == v.StatusCode &&
(r.Location == v.Location || // either both locations are nil or exactly the same pointer
r.Location != nil && v.Location != nil && r.Location.String() == v.Location.String()) // or they are both not nil and marshaled identically
}
// sanitizeURL redacts the client_secret parameter from the URL which may be
// exposed to the user.
func sanitizeURL(uri *url.URL) *url.URL {
@ -1260,7 +1395,8 @@ func (e *Error) UnmarshalJSON(data []byte) error {
//
// The error type will be *RateLimitError for rate limit exceeded errors,
// *AcceptedError for 202 Accepted status codes,
// and *TwoFactorAuthError for two-factor authentication errors.
// *TwoFactorAuthError for two-factor authentication errors,
// and *RedirectionError for redirect status codes (only happens when ignoring redirections).
func CheckResponse(r *http.Response) error {
if r.StatusCode == http.StatusAccepted {
return &AcceptedError{}
@ -1302,6 +1438,25 @@ func CheckResponse(r *http.Response) error {
abuseRateLimitError.RetryAfter = retryAfter
}
return abuseRateLimitError
// Check that the status code is a redirection and return a sentinel error that can be used to handle special cases
// where 302 is considered a successful result.
// This should never happen with the default `CheckRedirect`, because it would return a `url.Error` that should be handled upstream.
case r.StatusCode == http.StatusMovedPermanently ||
r.StatusCode == http.StatusFound ||
r.StatusCode == http.StatusSeeOther ||
r.StatusCode == http.StatusTemporaryRedirect ||
r.StatusCode == http.StatusPermanentRedirect:
locationStr := r.Header.Get("Location")
var location *url.URL
if locationStr != "" {
location, _ = url.Parse(locationStr)
}
return &RedirectionError{
Response: errorResponse.Response,
StatusCode: r.StatusCode,
Location: location,
}
default:
return errorResponse
}
@ -1616,3 +1771,18 @@ type roundTripperFunc func(*http.Request) (*http.Response, error)
func (fn roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return fn(r)
}
var runIDFromURLRE = regexp.MustCompile(`^repos/.*/actions/runs/(\d+)/deployment_protection_rule$`)
// GetRunID is a Helper Function used to extract the workflow RunID from the *DeploymentProtectionRuleEvent.DeploymentCallBackURL.
func (e *DeploymentProtectionRuleEvent) GetRunID() (int64, error) {
match := runIDFromURLRE.FindStringSubmatch(*e.DeploymentCallbackURL)
if len(match) != 2 {
return -1, errors.New("no match")
}
runID, err := strconv.ParseInt(match[1], 10, 64)
if err != nil {
return -1, err
}
return runID, nil
}

View file

@ -55,6 +55,7 @@ type Issue struct {
Assignees []*User `json:"assignees,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Draft *bool `json:"draft,omitempty"`
Type *IssueType `json:"type,omitempty"`
// TextMatches is only populated from search results that request text matches
// See: search.go and https://docs.github.com/rest/search/#text-match-metadata
@ -129,6 +130,18 @@ type PullRequestLinks struct {
MergedAt *Timestamp `json:"merged_at,omitempty"`
}
// IssueType represents the type of issue.
// For now it shows up when receiveing an Issue event.
type IssueType struct {
ID *int64 `json:"id,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Color *string `json:"color,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
}
// List the issues for the authenticated user. If all is true, list issues
// across all the user's visible repositories including owned, member, and
// organization repositories; if false, list only owned and member

Some files were not shown because too many files have changed in this diff Show more