From bbbe67bf7ce3bb0669efe85d23985934ef79e9a5 Mon Sep 17 00:00:00 2001
From: Gabriel Adrian Samfira
Date: Thu, 30 Jun 2022 10:17:39 +0000
Subject: [PATCH] Vendor packages and add Makefile
* Vendors packages
* Adds a Makefile that uses docker to build a static binary against musl
using alpine linux.
Signed-off-by: Gabriel Adrian Samfira
---
Dockerfile | 11 +
Makefile | 11 +
cmd/garm-cli/cmd/root.go | 2 +
cmd/garm-cli/cmd/version.go | 35 +
runner/pool/common.go | 11 +-
scripts/build-static.sh | 20 +
vendor/github.com/BurntSushi/toml/.gitignore | 2 +
vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 +
vendor/github.com/BurntSushi/toml/COPYING | 21 +
vendor/github.com/BurntSushi/toml/README.md | 220 +
vendor/github.com/BurntSushi/toml/decode.go | 511 +
.../BurntSushi/toml/decode_go116.go | 18 +
.../github.com/BurntSushi/toml/decode_meta.go | 123 +
.../github.com/BurntSushi/toml/deprecated.go | 33 +
vendor/github.com/BurntSushi/toml/doc.go | 13 +
vendor/github.com/BurntSushi/toml/encode.go | 650 +
.../github.com/BurntSushi/toml/internal/tz.go | 36 +
vendor/github.com/BurntSushi/toml/lex.go | 1225 +
vendor/github.com/BurntSushi/toml/parse.go | 739 +
.../github.com/BurntSushi/toml/type_check.go | 70 +
.../github.com/BurntSushi/toml/type_fields.go | 242 +
vendor/github.com/chzyer/readline/.gitignore | 1 +
vendor/github.com/chzyer/readline/.travis.yml | 8 +
.../github.com/chzyer/readline/CHANGELOG.md | 58 +
vendor/github.com/chzyer/readline/LICENSE | 22 +
vendor/github.com/chzyer/readline/README.md | 114 +
.../chzyer/readline/ansi_windows.go | 249 +
vendor/github.com/chzyer/readline/complete.go | 285 +
.../chzyer/readline/complete_helper.go | 165 +
.../chzyer/readline/complete_segment.go | 82 +
vendor/github.com/chzyer/readline/history.go | 330 +
.../github.com/chzyer/readline/operation.go | 531 +
vendor/github.com/chzyer/readline/password.go | 33 +
.../chzyer/readline/rawreader_windows.go | 125 +
vendor/github.com/chzyer/readline/readline.go | 326 +
vendor/github.com/chzyer/readline/remote.go | 475 +
vendor/github.com/chzyer/readline/runebuf.go | 629 +
vendor/github.com/chzyer/readline/runes.go | 223 +
vendor/github.com/chzyer/readline/search.go | 164 +
vendor/github.com/chzyer/readline/std.go | 197 +
.../github.com/chzyer/readline/std_windows.go | 9 +
vendor/github.com/chzyer/readline/term.go | 123 +
vendor/github.com/chzyer/readline/term_bsd.go | 29 +
.../github.com/chzyer/readline/term_linux.go | 33 +
.../chzyer/readline/term_solaris.go | 32 +
.../github.com/chzyer/readline/term_unix.go | 24 +
.../chzyer/readline/term_windows.go | 171 +
vendor/github.com/chzyer/readline/terminal.go | 238 +
vendor/github.com/chzyer/readline/utils.go | 277 +
.../github.com/chzyer/readline/utils_unix.go | 83 +
.../chzyer/readline/utils_windows.go | 41 +
vendor/github.com/chzyer/readline/vim.go | 176 +
.../github.com/chzyer/readline/windows_api.go | 152 +
vendor/github.com/davecgh/go-spew/LICENSE | 15 +
.../github.com/davecgh/go-spew/spew/bypass.go | 145 +
.../davecgh/go-spew/spew/bypasssafe.go | 38 +
.../github.com/davecgh/go-spew/spew/common.go | 341 +
.../github.com/davecgh/go-spew/spew/config.go | 306 +
vendor/github.com/davecgh/go-spew/spew/doc.go | 211 +
.../github.com/davecgh/go-spew/spew/dump.go | 509 +
.../github.com/davecgh/go-spew/spew/format.go | 419 +
.../github.com/davecgh/go-spew/spew/spew.go | 148 +
.../github.com/felixge/httpsnoop/.gitignore | 0
.../github.com/felixge/httpsnoop/.travis.yml | 6 +
.../github.com/felixge/httpsnoop/LICENSE.txt | 19 +
vendor/github.com/felixge/httpsnoop/Makefile | 10 +
vendor/github.com/felixge/httpsnoop/README.md | 94 +
.../felixge/httpsnoop/capture_metrics.go | 84 +
vendor/github.com/felixge/httpsnoop/docs.go | 10 +
.../httpsnoop/wrap_generated_gteq_1.8.go | 385 +
.../httpsnoop/wrap_generated_lt_1.8.go | 243 +
.../github.com/flosch/pongo2/.gitattributes | 1 +
vendor/github.com/flosch/pongo2/.gitignore | 41 +
vendor/github.com/flosch/pongo2/.travis.yml | 8 +
vendor/github.com/flosch/pongo2/AUTHORS | 11 +
vendor/github.com/flosch/pongo2/LICENSE | 20 +
vendor/github.com/flosch/pongo2/README.md | 167 +
vendor/github.com/flosch/pongo2/context.go | 137 +
vendor/github.com/flosch/pongo2/doc.go | 31 +
vendor/github.com/flosch/pongo2/error.go | 91 +
vendor/github.com/flosch/pongo2/filters.go | 141 +
.../flosch/pongo2/filters_builtin.go | 927 +
vendor/github.com/flosch/pongo2/helpers.go | 15 +
vendor/github.com/flosch/pongo2/lexer.go | 432 +
vendor/github.com/flosch/pongo2/nodes.go | 16 +
vendor/github.com/flosch/pongo2/nodes_html.go | 23 +
.../github.com/flosch/pongo2/nodes_wrapper.go | 16 +
vendor/github.com/flosch/pongo2/options.go | 26 +
vendor/github.com/flosch/pongo2/parser.go | 309 +
.../flosch/pongo2/parser_document.go | 59 +
.../flosch/pongo2/parser_expression.go | 517 +
vendor/github.com/flosch/pongo2/pongo2.go | 14 +
vendor/github.com/flosch/pongo2/tags.go | 133 +
.../flosch/pongo2/tags_autoescape.go | 52 +
vendor/github.com/flosch/pongo2/tags_block.go | 129 +
.../github.com/flosch/pongo2/tags_comment.go | 27 +
vendor/github.com/flosch/pongo2/tags_cycle.go | 106 +
.../github.com/flosch/pongo2/tags_extends.go | 52 +
.../github.com/flosch/pongo2/tags_filter.go | 95 +
.../github.com/flosch/pongo2/tags_firstof.go | 49 +
vendor/github.com/flosch/pongo2/tags_for.go | 159 +
vendor/github.com/flosch/pongo2/tags_if.go | 76 +
.../flosch/pongo2/tags_ifchanged.go | 116 +
.../github.com/flosch/pongo2/tags_ifequal.go | 78 +
.../flosch/pongo2/tags_ifnotequal.go | 78 +
.../github.com/flosch/pongo2/tags_import.go | 84 +
.../github.com/flosch/pongo2/tags_include.go | 146 +
vendor/github.com/flosch/pongo2/tags_lorem.go | 132 +
vendor/github.com/flosch/pongo2/tags_macro.go | 149 +
vendor/github.com/flosch/pongo2/tags_now.go | 50 +
vendor/github.com/flosch/pongo2/tags_set.go | 50 +
.../flosch/pongo2/tags_spaceless.go | 54 +
vendor/github.com/flosch/pongo2/tags_ssi.go | 68 +
.../flosch/pongo2/tags_templatetag.go | 45 +
.../flosch/pongo2/tags_widthratio.go | 83 +
vendor/github.com/flosch/pongo2/tags_with.go | 88 +
vendor/github.com/flosch/pongo2/template.go | 276 +
.../flosch/pongo2/template_loader.go | 156 +
.../github.com/flosch/pongo2/template_sets.go | 305 +
vendor/github.com/flosch/pongo2/value.go | 540 +
vendor/github.com/flosch/pongo2/variable.go | 693 +
.../go-macaroon-bakery/macaroonpb/LICENSE | 187 +
.../go-macaroon-bakery/macaroonpb/README.md | 13 +
.../go-macaroon-bakery/macaroonpb/id.go | 19 +
.../go-macaroon-bakery/macaroonpb/id.pb.go | 238 +
.../go-macaroon-bakery/macaroonpb/id.proto | 14 +
.../github.com/go-resty/resty/v2/.gitignore | 30 +
.../github.com/go-resty/resty/v2/BUILD.bazel | 48 +
vendor/github.com/go-resty/resty/v2/LICENSE | 21 +
vendor/github.com/go-resty/resty/v2/README.md | 906 +
vendor/github.com/go-resty/resty/v2/WORKSPACE | 31 +
vendor/github.com/go-resty/resty/v2/client.go | 1115 +
.../go-resty/resty/v2/middleware.go | 543 +
.../github.com/go-resty/resty/v2/redirect.go | 101 +
.../github.com/go-resty/resty/v2/request.go | 896 +
.../github.com/go-resty/resty/v2/response.go | 175 +
vendor/github.com/go-resty/resty/v2/resty.go | 40 +
vendor/github.com/go-resty/resty/v2/retry.go | 221 +
vendor/github.com/go-resty/resty/v2/trace.go | 130 +
.../github.com/go-resty/resty/v2/transport.go | 35 +
.../go-resty/resty/v2/transport112.go | 34 +
vendor/github.com/go-resty/resty/v2/util.go | 391 +
.../github.com/go-sql-driver/mysql/.gitignore | 9 +
vendor/github.com/go-sql-driver/mysql/AUTHORS | 117 +
.../go-sql-driver/mysql/CHANGELOG.md | 232 +
vendor/github.com/go-sql-driver/mysql/LICENSE | 373 +
.../github.com/go-sql-driver/mysql/README.md | 520 +
vendor/github.com/go-sql-driver/mysql/auth.go | 425 +
.../github.com/go-sql-driver/mysql/buffer.go | 182 +
.../go-sql-driver/mysql/collations.go | 265 +
.../go-sql-driver/mysql/conncheck.go | 54 +
.../go-sql-driver/mysql/conncheck_dummy.go | 17 +
.../go-sql-driver/mysql/connection.go | 650 +
.../go-sql-driver/mysql/connector.go | 146 +
.../github.com/go-sql-driver/mysql/const.go | 174 +
.../github.com/go-sql-driver/mysql/driver.go | 107 +
vendor/github.com/go-sql-driver/mysql/dsn.go | 560 +
.../github.com/go-sql-driver/mysql/errors.go | 65 +
.../github.com/go-sql-driver/mysql/fields.go | 194 +
vendor/github.com/go-sql-driver/mysql/fuzz.go | 24 +
.../github.com/go-sql-driver/mysql/infile.go | 182 +
.../go-sql-driver/mysql/nulltime.go | 50 +
.../go-sql-driver/mysql/nulltime_go113.go | 40 +
.../go-sql-driver/mysql/nulltime_legacy.go | 39 +
.../github.com/go-sql-driver/mysql/packets.go | 1349 +
.../github.com/go-sql-driver/mysql/result.go | 22 +
vendor/github.com/go-sql-driver/mysql/rows.go | 223 +
.../go-sql-driver/mysql/statement.go | 220 +
.../go-sql-driver/mysql/transaction.go | 31 +
.../github.com/go-sql-driver/mysql/utils.go | 868 +
vendor/github.com/golang-jwt/jwt/.gitignore | 4 +
vendor/github.com/golang-jwt/jwt/LICENSE | 9 +
.../golang-jwt/jwt/MIGRATION_GUIDE.md | 22 +
vendor/github.com/golang-jwt/jwt/README.md | 113 +
.../golang-jwt/jwt/VERSION_HISTORY.md | 131 +
vendor/github.com/golang-jwt/jwt/claims.go | 146 +
vendor/github.com/golang-jwt/jwt/doc.go | 4 +
vendor/github.com/golang-jwt/jwt/ecdsa.go | 142 +
.../github.com/golang-jwt/jwt/ecdsa_utils.go | 69 +
vendor/github.com/golang-jwt/jwt/ed25519.go | 81 +
.../golang-jwt/jwt/ed25519_utils.go | 64 +
vendor/github.com/golang-jwt/jwt/errors.go | 59 +
vendor/github.com/golang-jwt/jwt/hmac.go | 95 +
.../github.com/golang-jwt/jwt/map_claims.go | 120 +
vendor/github.com/golang-jwt/jwt/none.go | 52 +
vendor/github.com/golang-jwt/jwt/parser.go | 148 +
vendor/github.com/golang-jwt/jwt/rsa.go | 101 +
vendor/github.com/golang-jwt/jwt/rsa_pss.go | 142 +
vendor/github.com/golang-jwt/jwt/rsa_utils.go | 101 +
.../golang-jwt/jwt/signing_method.go | 35 +
vendor/github.com/golang-jwt/jwt/token.go | 104 +
vendor/github.com/golang/protobuf/AUTHORS | 3 +
.../github.com/golang/protobuf/CONTRIBUTORS | 3 +
vendor/github.com/golang/protobuf/LICENSE | 28 +
.../golang/protobuf/proto/buffer.go | 324 +
.../golang/protobuf/proto/defaults.go | 63 +
.../golang/protobuf/proto/deprecated.go | 113 +
.../golang/protobuf/proto/discard.go | 58 +
.../golang/protobuf/proto/extensions.go | 356 +
.../golang/protobuf/proto/properties.go | 306 +
.../github.com/golang/protobuf/proto/proto.go | 167 +
.../golang/protobuf/proto/registry.go | 317 +
.../golang/protobuf/proto/text_decode.go | 801 +
.../golang/protobuf/proto/text_encode.go | 560 +
.../github.com/golang/protobuf/proto/wire.go | 78 +
.../golang/protobuf/proto/wrappers.go | 34 +
.../github.com/google/go-github/v43/AUTHORS | 357 +
.../github.com/google/go-github/v43/LICENSE | 27 +
.../google/go-github/v43/github/actions.go | 12 +
.../go-github/v43/github/actions_artifacts.go | 164 +
.../v43/github/actions_runner_groups.go | 294 +
.../go-github/v43/github/actions_runners.go | 377 +
.../go-github/v43/github/actions_secrets.go | 358 +
.../v43/github/actions_workflow_jobs.go | 154 +
.../v43/github/actions_workflow_runs.go | 292 +
.../go-github/v43/github/actions_workflows.go | 218 +
.../google/go-github/v43/github/activity.go | 72 +
.../go-github/v43/github/activity_events.go | 217 +
.../v43/github/activity_notifications.go | 223 +
.../go-github/v43/github/activity_star.go | 138 +
.../go-github/v43/github/activity_watching.go | 147 +
.../google/go-github/v43/github/admin.go | 119 +
.../google/go-github/v43/github/admin_orgs.go | 89 +
.../go-github/v43/github/admin_stats.go | 171 +
.../go-github/v43/github/admin_users.go | 133 +
.../google/go-github/v43/github/apps.go | 356 +
.../google/go-github/v43/github/apps_hooks.go | 48 +
.../v43/github/apps_hooks_deliveries.go | 72 +
.../go-github/v43/github/apps_installation.go | 128 +
.../go-github/v43/github/apps_manifest.go | 49 +
.../go-github/v43/github/apps_marketplace.go | 180 +
.../go-github/v43/github/authorizations.go | 281 +
.../google/go-github/v43/github/billing.go | 199 +
.../google/go-github/v43/github/checks.go | 435 +
.../go-github/v43/github/code-scanning.go | 302 +
.../google/go-github/v43/github/dependabot.go | 12 +
.../v43/github/dependabot_secrets.go | 228 +
.../google/go-github/v43/github/doc.go | 213 +
.../google/go-github/v43/github/enterprise.go | 12 +
.../v43/github/enterprise_actions_runners.go | 69 +
.../v43/github/enterprise_audit_log.go | 35 +
.../google/go-github/v43/github/event.go | 154 +
.../go-github/v43/github/event_types.go | 1250 +
.../google/go-github/v43/github/gists.go | 364 +
.../go-github/v43/github/gists_comments.go | 119 +
.../google/go-github/v43/github/git.go | 12 +
.../google/go-github/v43/github/git_blobs.go | 69 +
.../go-github/v43/github/git_commits.go | 200 +
.../google/go-github/v43/github/git_refs.go | 175 +
.../google/go-github/v43/github/git_tags.go | 76 +
.../google/go-github/v43/github/git_trees.go | 162 +
.../go-github/v43/github/github-accessors.go | 19733 ++
.../google/go-github/v43/github/github.go | 1296 +
.../google/go-github/v43/github/gitignore.go | 64 +
.../go-github/v43/github/interactions.go | 28 +
.../go-github/v43/github/interactions_orgs.go | 80 +
.../v43/github/interactions_repos.go | 80 +
.../go-github/v43/github/issue_import.go | 152 +
.../google/go-github/v43/github/issues.go | 358 +
.../go-github/v43/github/issues_assignees.go | 85 +
.../go-github/v43/github/issues_comments.go | 154 +
.../go-github/v43/github/issues_events.go | 179 +
.../go-github/v43/github/issues_labels.go | 231 +
.../go-github/v43/github/issues_milestones.go | 148 +
.../go-github/v43/github/issues_timeline.go | 184 +
.../google/go-github/v43/github/licenses.go | 97 +
.../google/go-github/v43/github/messages.go | 304 +
.../google/go-github/v43/github/migrations.go | 228 +
.../v43/github/migrations_source_import.go | 305 +
.../go-github/v43/github/migrations_user.go | 214 +
.../google/go-github/v43/github/misc.go | 271 +
.../google/go-github/v43/github/orgs.go | 272 +
.../v43/github/orgs_actions_allowed.go | 58 +
.../v43/github/orgs_actions_permissions.go | 58 +
.../go-github/v43/github/orgs_audit_log.go | 116 +
.../google/go-github/v43/github/orgs_hooks.go | 118 +
.../v43/github/orgs_hooks_deliveries.go | 73 +
.../go-github/v43/github/orgs_members.go | 388 +
.../v43/github/orgs_outside_collaborators.go | 81 +
.../go-github/v43/github/orgs_packages.go | 149 +
.../go-github/v43/github/orgs_projects.go | 60 +
.../v43/github/orgs_users_blocking.go | 91 +
.../google/go-github/v43/github/packages.go | 143 +
.../google/go-github/v43/github/projects.go | 596 +
.../google/go-github/v43/github/pulls.go | 486 +
.../go-github/v43/github/pulls_comments.go | 202 +
.../go-github/v43/github/pulls_reviewers.go | 80 +
.../go-github/v43/github/pulls_reviews.go | 312 +
.../google/go-github/v43/github/reactions.go | 520 +
.../google/go-github/v43/github/repos.go | 1692 +
.../go-github/v43/github/repos_autolinks.go | 102 +
.../v43/github/repos_collaborators.go | 154 +
.../go-github/v43/github/repos_comments.go | 162 +
.../go-github/v43/github/repos_commits.go | 312 +
.../v43/github/repos_community_health.go | 62 +
.../go-github/v43/github/repos_contents.go | 323 +
.../go-github/v43/github/repos_deployments.go | 250 +
.../v43/github/repos_environments.go | 201 +
.../go-github/v43/github/repos_forks.go | 96 +
.../go-github/v43/github/repos_hooks.go | 234 +
.../v43/github/repos_hooks_deliveries.go | 117 +
.../go-github/v43/github/repos_invitations.go | 89 +
.../google/go-github/v43/github/repos_keys.go | 91 +
.../go-github/v43/github/repos_merging.go | 38 +
.../go-github/v43/github/repos_pages.go | 240 +
.../v43/github/repos_prereceive_hooks.go | 110 +
.../go-github/v43/github/repos_projects.go | 69 +
.../go-github/v43/github/repos_releases.go | 441 +
.../go-github/v43/github/repos_stats.go | 226 +
.../go-github/v43/github/repos_statuses.go | 133 +
.../go-github/v43/github/repos_traffic.go | 141 +
.../google/go-github/v43/github/scim.go | 163 +
.../google/go-github/v43/github/search.go | 298 +
.../go-github/v43/github/secret_scanning.go | 234 +
.../google/go-github/v43/github/strings.go | 96 +
.../google/go-github/v43/github/teams.go | 970 +
.../v43/github/teams_discussion_comments.go | 242 +
.../go-github/v43/github/teams_discussions.go | 247 +
.../go-github/v43/github/teams_members.go | 243 +
.../google/go-github/v43/github/timestamp.go | 44 +
.../google/go-github/v43/github/users.go | 276 +
.../v43/github/users_administration.go | 72 +
.../go-github/v43/github/users_blocking.go | 91 +
.../go-github/v43/github/users_emails.go | 72 +
.../go-github/v43/github/users_followers.go | 122 +
.../go-github/v43/github/users_gpg_keys.go | 130 +
.../google/go-github/v43/github/users_keys.go | 111 +
.../go-github/v43/github/users_packages.go | 211 +
.../go-github/v43/github/users_projects.go | 68 +
.../go-github/v43/github/with_appengine.go | 21 +
.../go-github/v43/github/without_appengine.go | 20 +
.../github.com/google/go-querystring/LICENSE | 27 +
.../google/go-querystring/query/encode.go | 357 +
vendor/github.com/google/uuid/.travis.yml | 9 +
vendor/github.com/google/uuid/CONTRIBUTING.md | 10 +
vendor/github.com/google/uuid/CONTRIBUTORS | 9 +
vendor/github.com/google/uuid/LICENSE | 27 +
vendor/github.com/google/uuid/README.md | 19 +
vendor/github.com/google/uuid/dce.go | 80 +
vendor/github.com/google/uuid/doc.go | 12 +
vendor/github.com/google/uuid/hash.go | 53 +
vendor/github.com/google/uuid/marshal.go | 38 +
vendor/github.com/google/uuid/node.go | 90 +
vendor/github.com/google/uuid/node_js.go | 12 +
vendor/github.com/google/uuid/node_net.go | 33 +
vendor/github.com/google/uuid/null.go | 118 +
vendor/github.com/google/uuid/sql.go | 59 +
vendor/github.com/google/uuid/time.go | 123 +
vendor/github.com/google/uuid/util.go | 43 +
vendor/github.com/google/uuid/uuid.go | 294 +
vendor/github.com/google/uuid/version1.go | 44 +
vendor/github.com/google/uuid/version4.go | 76 +
vendor/github.com/gorilla/handlers/LICENSE | 22 +
vendor/github.com/gorilla/handlers/README.md | 56 +
.../github.com/gorilla/handlers/canonical.go | 74 +
.../github.com/gorilla/handlers/compress.go | 143 +
vendor/github.com/gorilla/handlers/cors.go | 355 +
vendor/github.com/gorilla/handlers/doc.go | 9 +
.../github.com/gorilla/handlers/handlers.go | 147 +
vendor/github.com/gorilla/handlers/logging.go | 244 +
.../gorilla/handlers/proxy_headers.go | 120 +
.../github.com/gorilla/handlers/recovery.go | 96 +
vendor/github.com/gorilla/mux/AUTHORS | 8 +
vendor/github.com/gorilla/mux/LICENSE | 27 +
vendor/github.com/gorilla/mux/README.md | 805 +
vendor/github.com/gorilla/mux/doc.go | 306 +
vendor/github.com/gorilla/mux/middleware.go | 74 +
vendor/github.com/gorilla/mux/mux.go | 606 +
vendor/github.com/gorilla/mux/regexp.go | 388 +
vendor/github.com/gorilla/mux/route.go | 736 +
vendor/github.com/gorilla/mux/test_helpers.go | 19 +
.../github.com/gorilla/websocket/.gitignore | 25 +
vendor/github.com/gorilla/websocket/AUTHORS | 9 +
vendor/github.com/gorilla/websocket/LICENSE | 22 +
vendor/github.com/gorilla/websocket/README.md | 39 +
vendor/github.com/gorilla/websocket/client.go | 422 +
.../gorilla/websocket/compression.go | 148 +
vendor/github.com/gorilla/websocket/conn.go | 1230 +
vendor/github.com/gorilla/websocket/doc.go | 227 +
vendor/github.com/gorilla/websocket/join.go | 42 +
vendor/github.com/gorilla/websocket/json.go | 60 +
vendor/github.com/gorilla/websocket/mask.go | 55 +
.../github.com/gorilla/websocket/mask_safe.go | 16 +
.../github.com/gorilla/websocket/prepared.go | 102 +
vendor/github.com/gorilla/websocket/proxy.go | 77 +
vendor/github.com/gorilla/websocket/server.go | 365 +
.../gorilla/websocket/tls_handshake.go | 21 +
.../gorilla/websocket/tls_handshake_116.go | 21 +
vendor/github.com/gorilla/websocket/util.go | 283 +
.../gorilla/websocket/x_net_proxy.go | 473 +
.../inconshreveable/mousetrap/LICENSE | 13 +
.../inconshreveable/mousetrap/README.md | 23 +
.../inconshreveable/mousetrap/trap_others.go | 15 +
.../inconshreveable/mousetrap/trap_windows.go | 98 +
.../mousetrap/trap_windows_1.4.go | 46 +
.../github.com/jedib0t/go-pretty/v6/LICENSE | 21 +
.../jedib0t/go-pretty/v6/table/README.md | 446 +
.../jedib0t/go-pretty/v6/table/config.go | 92 +
.../jedib0t/go-pretty/v6/table/render.go | 394 +
.../jedib0t/go-pretty/v6/table/render_csv.go | 81 +
.../jedib0t/go-pretty/v6/table/render_html.go | 244 +
.../go-pretty/v6/table/render_markdown.go | 113 +
.../jedib0t/go-pretty/v6/table/sort.go | 127 +
.../jedib0t/go-pretty/v6/table/style.go | 874 +
.../jedib0t/go-pretty/v6/table/table.go | 1000 +
.../jedib0t/go-pretty/v6/table/util.go | 41 +
.../jedib0t/go-pretty/v6/table/writer.go | 43 +
.../jedib0t/go-pretty/v6/text/align.go | 137 +
.../jedib0t/go-pretty/v6/text/ansi.go | 55 +
.../jedib0t/go-pretty/v6/text/ansi_unix.go | 7 +
.../jedib0t/go-pretty/v6/text/ansi_windows.go | 31 +
.../jedib0t/go-pretty/v6/text/color.go | 183 +
.../jedib0t/go-pretty/v6/text/color_html.go | 48 +
.../jedib0t/go-pretty/v6/text/cursor.go | 39 +
.../jedib0t/go-pretty/v6/text/filter.go | 12 +
.../jedib0t/go-pretty/v6/text/format.go | 100 +
.../jedib0t/go-pretty/v6/text/string.go | 204 +
.../jedib0t/go-pretty/v6/text/transformer.go | 202 +
.../jedib0t/go-pretty/v6/text/valign.go | 67 +
.../jedib0t/go-pretty/v6/text/wrap.go | 261 +
vendor/github.com/jinzhu/inflection/LICENSE | 21 +
vendor/github.com/jinzhu/inflection/README.md | 55 +
.../jinzhu/inflection/inflections.go | 273 +
.../github.com/jinzhu/inflection/wercker.yml | 23 +
vendor/github.com/jinzhu/now/Guardfile | 3 +
vendor/github.com/jinzhu/now/License | 21 +
vendor/github.com/jinzhu/now/README.md | 137 +
vendor/github.com/jinzhu/now/main.go | 200 +
vendor/github.com/jinzhu/now/now.go | 245 +
vendor/github.com/jinzhu/now/time.go | 9 +
vendor/github.com/juju/webbrowser/.gitignore | 24 +
vendor/github.com/juju/webbrowser/LICENSE | 165 +
vendor/github.com/juju/webbrowser/README.md | 2 +
.../github.com/juju/webbrowser/webbrowser.go | 61 +
.../julienschmidt/httprouter/.travis.yml | 18 +
.../julienschmidt/httprouter/LICENSE | 29 +
.../julienschmidt/httprouter/README.md | 300 +
.../julienschmidt/httprouter/path.go | 123 +
.../julienschmidt/httprouter/router.go | 452 +
.../julienschmidt/httprouter/tree.go | 666 +
.../github.com/kballard/go-shellquote/LICENSE | 19 +
.../github.com/kballard/go-shellquote/README | 36 +
.../github.com/kballard/go-shellquote/doc.go | 3 +
.../kballard/go-shellquote/quote.go | 102 +
.../kballard/go-shellquote/unquote.go | 156 +
vendor/github.com/kr/fs/LICENSE | 27 +
vendor/github.com/kr/fs/Readme | 3 +
vendor/github.com/kr/fs/filesystem.go | 36 +
vendor/github.com/kr/fs/walk.go | 95 +
vendor/github.com/lxc/lxd/AUTHORS | 5 +
vendor/github.com/lxc/lxd/COPYING | 202 +
.../github.com/lxc/lxd/client/connection.go | 353 +
vendor/github.com/lxc/lxd/client/doc.go | 146 +
vendor/github.com/lxc/lxd/client/events.go | 113 +
.../github.com/lxc/lxd/client/interfaces.go | 649 +
.../lxc/lxd/client/interfaces_legacy.go | 126 +
vendor/github.com/lxc/lxd/client/lxd.go | 463 +
.../lxc/lxd/client/lxd_certificates.go | 106 +
.../github.com/lxc/lxd/client/lxd_cluster.go | 293 +
.../lxc/lxd/client/lxd_containers.go | 1784 +
.../github.com/lxc/lxd/client/lxd_events.go | 192 +
.../github.com/lxc/lxd/client/lxd_images.go | 932 +
.../lxc/lxd/client/lxd_instances.go | 2541 +
.../lxc/lxd/client/lxd_network_acls.go | 159 +
.../lxc/lxd/client/lxd_network_forwards.go | 105 +
.../lxc/lxd/client/lxd_network_peer.go | 106 +
.../lxc/lxd/client/lxd_network_zones.go | 202 +
.../github.com/lxc/lxd/client/lxd_networks.go | 154 +
.../lxc/lxd/client/lxd_operations.go | 103 +
.../github.com/lxc/lxd/client/lxd_profiles.go | 94 +
.../github.com/lxc/lxd/client/lxd_projects.go | 139 +
.../github.com/lxc/lxd/client/lxd_server.go | 183 +
.../lxc/lxd/client/lxd_storage_pools.go | 128 +
.../lxc/lxd/client/lxd_storage_volumes.go | 875 +
.../github.com/lxc/lxd/client/lxd_warnings.go | 90 +
.../github.com/lxc/lxd/client/operations.go | 325 +
.../lxc/lxd/client/simplestreams.go | 52 +
.../lxc/lxd/client/simplestreams_images.go | 284 +
vendor/github.com/lxc/lxd/client/util.go | 205 +
.../lxc/lxd/lxd/device/config/consts.go | 4 +
.../lxd/device/config/device_proxyaddress.go | 9 +
.../lxd/lxd/device/config/device_runconfig.go | 70 +
.../lxc/lxd/lxd/device/config/devices.go | 202 +
.../lxc/lxd/lxd/device/config/devices_sort.go | 71 +
.../lxd/lxd/device/config/devices_utils.go | 37 +
.../github.com/lxc/lxd/lxd/include/compiler.h | 83 +
.../github.com/lxc/lxd/lxd/include/config.h | 8 +
.../github.com/lxc/lxd/lxd/include/dummy.go | 1 +
.../lxc/lxd/lxd/include/error_utils.h | 69 +
.../lxc/lxd/lxd/include/file_utils.h | 33 +
vendor/github.com/lxc/lxd/lxd/include/lxd.h | 27 +
.../github.com/lxc/lxd/lxd/include/lxd_bpf.h | 4264 +
.../lxc/lxd/lxd/include/lxd_bpf_common.h | 57 +
.../lxc/lxd/lxd/include/lxd_posix_acl_xattr.h | 39 +
.../lxc/lxd/lxd/include/lxd_seccomp.h | 91 +
vendor/github.com/lxc/lxd/lxd/include/macro.h | 341 +
.../lxc/lxd/lxd/include/memory_utils.h | 67 +
.../lxc/lxd/lxd/include/mount_utils.h | 63 +
.../lxc/lxd/lxd/include/process_utils.h | 110 +
.../lxc/lxd/lxd/include/syscall_numbers.h | 246 +
.../lxc/lxd/lxd/include/syscall_wrappers.h | 164 +
.../instance/instancetype/instance_type.go | 52 +
.../instance/instancetype/instance_vmagent.go | 22 +
.../github.com/lxc/lxd/lxd/revert/revert.go | 47 +
.../lxc/lxd/shared/api/certificate.go | 118 +
.../github.com/lxc/lxd/shared/api/cluster.go | 301 +
.../lxc/lxd/shared/api/container.go | 141 +
.../lxc/lxd/shared/api/container_backup.go | 29 +
.../lxc/lxd/shared/api/container_console.go | 17 +
.../lxc/lxd/shared/api/container_exec.go | 26 +
.../lxc/lxd/shared/api/container_snapshot.go | 53 +
.../lxc/lxd/shared/api/container_state.go | 70 +
vendor/github.com/lxc/lxd/shared/api/doc.go | 13 +
vendor/github.com/lxc/lxd/shared/api/error.go | 72 +
vendor/github.com/lxc/lxd/shared/api/event.go | 155 +
vendor/github.com/lxc/lxd/shared/api/image.go | 312 +
.../github.com/lxc/lxd/shared/api/instance.go | 332 +
.../lxc/lxd/shared/api/instance_backup.go | 78 +
.../lxc/lxd/shared/api/instance_console.go | 30 +
.../lxc/lxd/shared/api/instance_exec.go | 56 +
.../lxc/lxd/shared/api/instance_snapshot.go | 126 +
.../lxc/lxd/shared/api/instance_state.go | 201 +
.../github.com/lxc/lxd/shared/api/network.go | 318 +
.../lxc/lxd/shared/api/network_acl.go | 151 +
.../lxc/lxd/shared/api/network_forward.go | 141 +
.../lxc/lxd/shared/api/network_peer.go | 81 +
.../lxc/lxd/shared/api/network_zone.go | 120 +
.../lxc/lxd/shared/api/operation.go | 157 +
.../github.com/lxc/lxd/shared/api/profile.go | 62 +
.../github.com/lxc/lxd/shared/api/project.go | 98 +
.../github.com/lxc/lxd/shared/api/resource.go | 1049 +
.../github.com/lxc/lxd/shared/api/response.go | 90 +
.../github.com/lxc/lxd/shared/api/server.go | 206 +
.../lxc/lxd/shared/api/status_code.go | 53 +
.../lxc/lxd/shared/api/storage_pool.go | 91 +
.../lxc/lxd/shared/api/storage_pool_volume.go | 210 +
.../shared/api/storage_pool_volume_backup.go | 68 +
.../api/storage_pool_volume_snapshot.go | 76 +
.../shared/api/storage_pool_volume_state.go | 28 +
vendor/github.com/lxc/lxd/shared/api/url.go | 76 +
.../github.com/lxc/lxd/shared/api/warning.go | 63 +
vendor/github.com/lxc/lxd/shared/archive.go | 59 +
.../lxc/lxd/shared/cancel/canceler.go | 78 +
vendor/github.com/lxc/lxd/shared/cert.go | 505 +
vendor/github.com/lxc/lxd/shared/cgo.go | 12 +
vendor/github.com/lxc/lxd/shared/instance.go | 414 +
.../lxc/lxd/shared/ioprogress/data.go | 16 +
.../lxc/lxd/shared/ioprogress/reader.go | 25 +
.../lxc/lxd/shared/ioprogress/tracker.go | 77 +
.../lxc/lxd/shared/ioprogress/writer.go | 25 +
vendor/github.com/lxc/lxd/shared/json.go | 47 +
.../lxc/lxd/shared/logger/format.go | 25 +
.../github.com/lxc/lxd/shared/logger/log.go | 73 +
.../lxc/lxd/shared/logger/syslog_linux.go | 20 +
.../lxc/lxd/shared/logger/syslog_other.go | 13 +
.../lxc/lxd/shared/logger/toplevel.go | 70 +
.../github.com/lxc/lxd/shared/logger/types.go | 35 +
.../lxc/lxd/shared/logger/wrapper.go | 55 +
vendor/github.com/lxc/lxd/shared/network.go | 519 +
.../github.com/lxc/lxd/shared/network_ip.go | 30 +
.../github.com/lxc/lxd/shared/network_unix.go | 26 +
.../lxc/lxd/shared/network_windows.go | 60 +
.../lxc/lxd/shared/osarch/architectures.go | 149 +
.../lxd/shared/osarch/architectures_linux.go | 20 +
.../lxd/shared/osarch/architectures_others.go | 7 +
.../lxc/lxd/shared/osarch/release.go | 50 +
vendor/github.com/lxc/lxd/shared/proxy.go | 169 +
.../lxc/lxd/shared/simplestreams/index.go | 17 +
.../lxc/lxd/shared/simplestreams/products.go | 289 +
.../lxd/shared/simplestreams/simplestreams.go | 499 +
.../lxc/lxd/shared/simplestreams/sort.go | 125 +
.../lxc/lxd/shared/tcp/tcp_timeout_user.go | 28 +
.../lxd/shared/tcp/tcp_timeout_user_noop.go | 14 +
.../lxc/lxd/shared/tcp/tcp_timeouts.go | 65 +
.../lxc/lxd/shared/termios/termios.go | 1 +
.../lxc/lxd/shared/termios/termios_linux.go | 79 +
.../lxc/lxd/shared/termios/termios_other.go | 49 +
.../github.com/lxc/lxd/shared/units/units.go | 194 +
vendor/github.com/lxc/lxd/shared/util.go | 1247 +
.../github.com/lxc/lxd/shared/util_linux.go | 671 +
.../lxc/lxd/shared/util_linux_cgo.go | 115 +
.../lxc/lxd/shared/util_linux_notcgo.go | 5 +
vendor/github.com/lxc/lxd/shared/util_unix.go | 15 +
.../github.com/lxc/lxd/shared/util_windows.go | 11 +
.../lxc/lxd/shared/validate/validate.go | 860 +
.../github.com/manifoldco/promptui/.gitignore | 3 +
.../manifoldco/promptui/.golangci.yml | 26 +
.../manifoldco/promptui/.travis.yml | 14 +
.../manifoldco/promptui/CHANGELOG.md | 130 +
.../manifoldco/promptui/CODE_OF_CONDUCT.md | 73 +
.../github.com/manifoldco/promptui/LICENSE.md | 29 +
.../github.com/manifoldco/promptui/Makefile | 49 +
.../github.com/manifoldco/promptui/README.md | 107 +
.../github.com/manifoldco/promptui/codes.go | 120 +
.../github.com/manifoldco/promptui/cursor.go | 232 +
.../manifoldco/promptui/keycodes.go | 29 +
.../manifoldco/promptui/keycodes_other.go | 10 +
.../manifoldco/promptui/keycodes_windows.go | 10 +
.../manifoldco/promptui/list/list.go | 237 +
.../github.com/manifoldco/promptui/prompt.go | 341 +
.../manifoldco/promptui/promptui.go | 27 +
.../promptui/screenbuf/screenbuf.go | 151 +
.../github.com/manifoldco/promptui/select.go | 638 +
.../github.com/manifoldco/promptui/styles.go | 23 +
.../manifoldco/promptui/styles_windows.go | 21 +
.../github.com/mattn/go-runewidth/.travis.yml | 16 +
vendor/github.com/mattn/go-runewidth/LICENSE | 21 +
.../github.com/mattn/go-runewidth/README.md | 27 +
.../github.com/mattn/go-runewidth/go.test.sh | 12 +
.../mattn/go-runewidth/runewidth.go | 273 +
.../mattn/go-runewidth/runewidth_appengine.go | 8 +
.../mattn/go-runewidth/runewidth_js.go | 9 +
.../mattn/go-runewidth/runewidth_posix.go | 82 +
.../mattn/go-runewidth/runewidth_table.go | 439 +
.../mattn/go-runewidth/runewidth_windows.go | 28 +
.../github.com/mattn/go-sqlite3/.codecov.yml | 4 +
vendor/github.com/mattn/go-sqlite3/.gitignore | 14 +
vendor/github.com/mattn/go-sqlite3/LICENSE | 21 +
vendor/github.com/mattn/go-sqlite3/README.md | 591 +
vendor/github.com/mattn/go-sqlite3/backup.go | 85 +
.../github.com/mattn/go-sqlite3/callback.go | 392 +
vendor/github.com/mattn/go-sqlite3/convert.go | 299 +
vendor/github.com/mattn/go-sqlite3/doc.go | 135 +
vendor/github.com/mattn/go-sqlite3/error.go | 150 +
.../mattn/go-sqlite3/sqlite3-binding.c | 239606 +++++++++++++++
.../mattn/go-sqlite3/sqlite3-binding.h | 12903 +
vendor/github.com/mattn/go-sqlite3/sqlite3.go | 2266 +
.../mattn/go-sqlite3/sqlite3_context.go | 103 +
.../mattn/go-sqlite3/sqlite3_func_crypt.go | 120 +
.../mattn/go-sqlite3/sqlite3_go18.go | 70 +
.../mattn/go-sqlite3/sqlite3_libsqlite3.go | 19 +
.../go-sqlite3/sqlite3_load_extension.go | 84 +
.../go-sqlite3/sqlite3_load_extension_omit.go | 24 +
.../sqlite3_opt_allow_uri_authority.go | 15 +
.../mattn/go-sqlite3/sqlite3_opt_app_armor.go | 16 +
.../go-sqlite3/sqlite3_opt_column_metadata.go | 21 +
.../go-sqlite3/sqlite3_opt_foreign_keys.go | 15 +
.../mattn/go-sqlite3/sqlite3_opt_fts5.go | 14 +
.../mattn/go-sqlite3/sqlite3_opt_icu.go | 17 +
.../go-sqlite3/sqlite3_opt_introspect.go | 15 +
.../mattn/go-sqlite3/sqlite3_opt_json1.go | 13 +
.../mattn/go-sqlite3/sqlite3_opt_preupdate.go | 20 +
.../go-sqlite3/sqlite3_opt_preupdate_hook.go | 112 +
.../go-sqlite3/sqlite3_opt_preupdate_omit.go | 21 +
.../go-sqlite3/sqlite3_opt_secure_delete.go | 15 +
.../sqlite3_opt_secure_delete_fast.go | 15 +
.../mattn/go-sqlite3/sqlite3_opt_stat4.go | 15 +
.../go-sqlite3/sqlite3_opt_unlock_notify.c | 85 +
.../go-sqlite3/sqlite3_opt_unlock_notify.go | 93 +
.../mattn/go-sqlite3/sqlite3_opt_userauth.go | 289 +
.../go-sqlite3/sqlite3_opt_userauth_omit.go | 152 +
.../go-sqlite3/sqlite3_opt_vacuum_full.go | 15 +
.../go-sqlite3/sqlite3_opt_vacuum_incr.go | 15 +
.../mattn/go-sqlite3/sqlite3_opt_vtable.go | 720 +
.../mattn/go-sqlite3/sqlite3_other.go | 17 +
.../mattn/go-sqlite3/sqlite3_solaris.go | 14 +
.../mattn/go-sqlite3/sqlite3_trace.go | 287 +
.../mattn/go-sqlite3/sqlite3_type.go | 108 +
.../go-sqlite3/sqlite3_usleep_windows.go | 41 +
.../mattn/go-sqlite3/sqlite3_windows.go | 18 +
.../github.com/mattn/go-sqlite3/sqlite3ext.h | 698 +
.../mattn/go-sqlite3/static_mock.go | 37 +
.../github.com/nbutton23/zxcvbn-go/.gitignore | 2 +
.../nbutton23/zxcvbn-go/LICENSE.txt | 20 +
.../github.com/nbutton23/zxcvbn-go/Makefile | 15 +
.../github.com/nbutton23/zxcvbn-go/README.md | 78 +
.../zxcvbn-go/adjacency/adjcmartix.go | 108 +
.../nbutton23/zxcvbn-go/data/bindata.go | 444 +
.../zxcvbn-go/entropy/entropyCalculator.go | 216 +
.../zxcvbn-go/frequency/frequency.go | 50 +
.../nbutton23/zxcvbn-go/match/match.go | 44 +
.../zxcvbn-go/matching/dateMatchers.go | 209 +
.../zxcvbn-go/matching/dictionaryMatch.go | 57 +
.../nbutton23/zxcvbn-go/matching/leet.go | 234 +
.../nbutton23/zxcvbn-go/matching/matching.go | 82 +
.../zxcvbn-go/matching/repeatMatch.go | 67 +
.../zxcvbn-go/matching/sequenceMatch.go | 76 +
.../zxcvbn-go/matching/spatialMatch.go | 88 +
.../nbutton23/zxcvbn-go/scoring/scoring.go | 177 +
.../zxcvbn-go/utils/math/mathutils.go | 40 +
.../github.com/nbutton23/zxcvbn-go/zxcvbn.go | 22 +
vendor/github.com/pborman/uuid/.travis.yml | 10 +
.../github.com/pborman/uuid/CONTRIBUTING.md | 10 +
vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 +
vendor/github.com/pborman/uuid/LICENSE | 27 +
vendor/github.com/pborman/uuid/README.md | 15 +
vendor/github.com/pborman/uuid/dce.go | 84 +
vendor/github.com/pborman/uuid/doc.go | 13 +
vendor/github.com/pborman/uuid/hash.go | 53 +
vendor/github.com/pborman/uuid/marshal.go | 85 +
vendor/github.com/pborman/uuid/node.go | 50 +
vendor/github.com/pborman/uuid/sql.go | 68 +
vendor/github.com/pborman/uuid/time.go | 57 +
vendor/github.com/pborman/uuid/util.go | 32 +
vendor/github.com/pborman/uuid/uuid.go | 162 +
vendor/github.com/pborman/uuid/version1.go | 23 +
vendor/github.com/pborman/uuid/version4.go | 26 +
vendor/github.com/pkg/errors/.gitignore | 24 +
vendor/github.com/pkg/errors/.travis.yml | 10 +
vendor/github.com/pkg/errors/LICENSE | 23 +
vendor/github.com/pkg/errors/Makefile | 44 +
vendor/github.com/pkg/errors/README.md | 59 +
vendor/github.com/pkg/errors/appveyor.yml | 32 +
vendor/github.com/pkg/errors/errors.go | 288 +
vendor/github.com/pkg/errors/go113.go | 38 +
vendor/github.com/pkg/errors/stack.go | 177 +
vendor/github.com/pkg/sftp/.gitignore | 10 +
vendor/github.com/pkg/sftp/CONTRIBUTORS | 3 +
vendor/github.com/pkg/sftp/LICENSE | 9 +
vendor/github.com/pkg/sftp/Makefile | 27 +
vendor/github.com/pkg/sftp/README.md | 44 +
vendor/github.com/pkg/sftp/allocator.go | 96 +
vendor/github.com/pkg/sftp/attrs.go | 90 +
vendor/github.com/pkg/sftp/attrs_stubs.go | 11 +
vendor/github.com/pkg/sftp/attrs_unix.go | 16 +
vendor/github.com/pkg/sftp/client.go | 1936 +
vendor/github.com/pkg/sftp/conn.go | 189 +
vendor/github.com/pkg/sftp/debug.go | 9 +
vendor/github.com/pkg/sftp/fuzz.go | 22 +
.../internal/encoding/ssh/filexfer/attrs.go | 326 +
.../internal/encoding/ssh/filexfer/buffer.go | 293 +
.../encoding/ssh/filexfer/extended_packets.go | 142 +
.../encoding/ssh/filexfer/extensions.go | 46 +
.../encoding/ssh/filexfer/filexfer.go | 54 +
.../sftp/internal/encoding/ssh/filexfer/fx.go | 147 +
.../internal/encoding/ssh/filexfer/fxp.go | 124 +
.../encoding/ssh/filexfer/handle_packets.go | 249 +
.../encoding/ssh/filexfer/init_packets.go | 99 +
.../encoding/ssh/filexfer/open_packets.go | 89 +
.../internal/encoding/ssh/filexfer/packets.go | 323 +
.../encoding/ssh/filexfer/path_packets.go | 368 +
.../encoding/ssh/filexfer/permissions.go | 114 +
.../encoding/ssh/filexfer/response_packets.go | 243 +
vendor/github.com/pkg/sftp/ls_formatting.go | 81 +
vendor/github.com/pkg/sftp/ls_plan9.go | 21 +
vendor/github.com/pkg/sftp/ls_stub.go | 11 +
vendor/github.com/pkg/sftp/ls_unix.go | 23 +
vendor/github.com/pkg/sftp/match.go | 137 +
vendor/github.com/pkg/sftp/packet-manager.go | 216 +
vendor/github.com/pkg/sftp/packet-typing.go | 135 +
vendor/github.com/pkg/sftp/packet.go | 1276 +
vendor/github.com/pkg/sftp/pool.go | 79 +
vendor/github.com/pkg/sftp/release.go | 5 +
vendor/github.com/pkg/sftp/request-attrs.go | 63 +
vendor/github.com/pkg/sftp/request-errors.go | 54 +
vendor/github.com/pkg/sftp/request-example.go | 666 +
.../github.com/pkg/sftp/request-interfaces.go | 121 +
vendor/github.com/pkg/sftp/request-plan9.go | 34 +
vendor/github.com/pkg/sftp/request-readme.md | 53 +
vendor/github.com/pkg/sftp/request-server.go | 304 +
vendor/github.com/pkg/sftp/request-unix.go | 27 +
vendor/github.com/pkg/sftp/request.go | 628 +
vendor/github.com/pkg/sftp/request_windows.go | 44 +
vendor/github.com/pkg/sftp/server.go | 616 +
.../pkg/sftp/server_statvfs_darwin.go | 21 +
.../pkg/sftp/server_statvfs_impl.go | 29 +
.../pkg/sftp/server_statvfs_linux.go | 22 +
.../pkg/sftp/server_statvfs_plan9.go | 13 +
.../pkg/sftp/server_statvfs_stubs.go | 15 +
vendor/github.com/pkg/sftp/sftp.go | 258 +
vendor/github.com/pkg/sftp/stat_plan9.go | 103 +
vendor/github.com/pkg/sftp/stat_posix.go | 123 +
vendor/github.com/pkg/sftp/syscall_fixed.go | 9 +
vendor/github.com/pkg/sftp/syscall_good.go | 8 +
vendor/github.com/pmezard/go-difflib/LICENSE | 27 +
.../pmezard/go-difflib/difflib/difflib.go | 772 +
vendor/github.com/rivo/uniseg/LICENSE.txt | 21 +
vendor/github.com/rivo/uniseg/README.md | 62 +
vendor/github.com/rivo/uniseg/doc.go | 8 +
vendor/github.com/rivo/uniseg/grapheme.go | 268 +
vendor/github.com/rivo/uniseg/properties.go | 1658 +
vendor/github.com/robfig/cron/v3/.gitignore | 22 +
vendor/github.com/robfig/cron/v3/.travis.yml | 1 +
vendor/github.com/robfig/cron/v3/LICENSE | 21 +
vendor/github.com/robfig/cron/v3/README.md | 125 +
vendor/github.com/robfig/cron/v3/chain.go | 92 +
.../robfig/cron/v3/constantdelay.go | 27 +
vendor/github.com/robfig/cron/v3/cron.go | 355 +
vendor/github.com/robfig/cron/v3/doc.go | 231 +
vendor/github.com/robfig/cron/v3/logger.go | 86 +
vendor/github.com/robfig/cron/v3/option.go | 45 +
vendor/github.com/robfig/cron/v3/parser.go | 434 +
vendor/github.com/robfig/cron/v3/spec.go | 188 +
vendor/github.com/rogpeppe/fastuuid/LICENSE | 26 +
vendor/github.com/rogpeppe/fastuuid/README.md | 95 +
vendor/github.com/rogpeppe/fastuuid/uuid.go | 146 +
vendor/github.com/satori/go.uuid/.travis.yml | 21 +
vendor/github.com/satori/go.uuid/LICENSE | 20 +
vendor/github.com/satori/go.uuid/README.md | 75 +
vendor/github.com/satori/go.uuid/codec.go | 206 +
vendor/github.com/satori/go.uuid/generator.go | 265 +
vendor/github.com/satori/go.uuid/sql.go | 78 +
vendor/github.com/satori/go.uuid/uuid.go | 161 +
vendor/github.com/sirupsen/logrus/.gitignore | 4 +
.../github.com/sirupsen/logrus/.golangci.yml | 40 +
vendor/github.com/sirupsen/logrus/.travis.yml | 15 +
.../github.com/sirupsen/logrus/CHANGELOG.md | 259 +
vendor/github.com/sirupsen/logrus/LICENSE | 21 +
vendor/github.com/sirupsen/logrus/README.md | 513 +
vendor/github.com/sirupsen/logrus/alt_exit.go | 76 +
.../github.com/sirupsen/logrus/appveyor.yml | 14 +
.../github.com/sirupsen/logrus/buffer_pool.go | 52 +
vendor/github.com/sirupsen/logrus/doc.go | 26 +
vendor/github.com/sirupsen/logrus/entry.go | 431 +
vendor/github.com/sirupsen/logrus/exported.go | 270 +
.../github.com/sirupsen/logrus/formatter.go | 78 +
vendor/github.com/sirupsen/logrus/hooks.go | 34 +
.../sirupsen/logrus/hooks/syslog/README.md | 39 +
.../sirupsen/logrus/hooks/syslog/syslog.go | 55 +
.../sirupsen/logrus/hooks/writer/README.md | 43 +
.../sirupsen/logrus/hooks/writer/writer.go | 29 +
.../sirupsen/logrus/json_formatter.go | 128 +
vendor/github.com/sirupsen/logrus/logger.go | 404 +
vendor/github.com/sirupsen/logrus/logrus.go | 186 +
.../logrus/terminal_check_appengine.go | 11 +
.../sirupsen/logrus/terminal_check_bsd.go | 13 +
.../sirupsen/logrus/terminal_check_js.go | 7 +
.../logrus/terminal_check_no_terminal.go | 11 +
.../logrus/terminal_check_notappengine.go | 17 +
.../sirupsen/logrus/terminal_check_solaris.go | 11 +
.../sirupsen/logrus/terminal_check_unix.go | 13 +
.../sirupsen/logrus/terminal_check_windows.go | 27 +
.../sirupsen/logrus/text_formatter.go | 339 +
vendor/github.com/sirupsen/logrus/writer.go | 70 +
vendor/github.com/spf13/cobra/.gitignore | 39 +
vendor/github.com/spf13/cobra/.golangci.yml | 48 +
vendor/github.com/spf13/cobra/.mailmap | 3 +
vendor/github.com/spf13/cobra/CONDUCT.md | 37 +
vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 +
vendor/github.com/spf13/cobra/LICENSE.txt | 174 +
vendor/github.com/spf13/cobra/MAINTAINERS | 13 +
vendor/github.com/spf13/cobra/Makefile | 35 +
vendor/github.com/spf13/cobra/README.md | 111 +
vendor/github.com/spf13/cobra/args.go | 121 +
.../spf13/cobra/bash_completions.go | 697 +
.../spf13/cobra/bash_completions.md | 93 +
.../spf13/cobra/bash_completionsV2.go | 327 +
vendor/github.com/spf13/cobra/cobra.go | 222 +
vendor/github.com/spf13/cobra/command.go | 1695 +
.../github.com/spf13/cobra/command_notwin.go | 6 +
vendor/github.com/spf13/cobra/command_win.go | 27 +
vendor/github.com/spf13/cobra/completions.go | 823 +
.../spf13/cobra/fish_completions.go | 219 +
.../spf13/cobra/fish_completions.md | 4 +
vendor/github.com/spf13/cobra/flag_groups.go | 174 +
.../spf13/cobra/powershell_completions.go | 285 +
.../spf13/cobra/powershell_completions.md | 3 +
.../spf13/cobra/projects_using_cobra.md | 53 +
.../spf13/cobra/shell_completions.go | 84 +
.../spf13/cobra/shell_completions.md | 548 +
vendor/github.com/spf13/cobra/user_guide.md | 662 +
.../github.com/spf13/cobra/zsh_completions.go | 258 +
.../github.com/spf13/cobra/zsh_completions.md | 48 +
vendor/github.com/spf13/pflag/.gitignore | 2 +
vendor/github.com/spf13/pflag/.travis.yml | 22 +
vendor/github.com/spf13/pflag/LICENSE | 28 +
vendor/github.com/spf13/pflag/README.md | 296 +
vendor/github.com/spf13/pflag/bool.go | 94 +
vendor/github.com/spf13/pflag/bool_slice.go | 185 +
vendor/github.com/spf13/pflag/bytes.go | 209 +
vendor/github.com/spf13/pflag/count.go | 96 +
vendor/github.com/spf13/pflag/duration.go | 86 +
.../github.com/spf13/pflag/duration_slice.go | 166 +
vendor/github.com/spf13/pflag/flag.go | 1239 +
vendor/github.com/spf13/pflag/float32.go | 88 +
.../github.com/spf13/pflag/float32_slice.go | 174 +
vendor/github.com/spf13/pflag/float64.go | 84 +
.../github.com/spf13/pflag/float64_slice.go | 166 +
vendor/github.com/spf13/pflag/golangflag.go | 105 +
vendor/github.com/spf13/pflag/int.go | 84 +
vendor/github.com/spf13/pflag/int16.go | 88 +
vendor/github.com/spf13/pflag/int32.go | 88 +
vendor/github.com/spf13/pflag/int32_slice.go | 174 +
vendor/github.com/spf13/pflag/int64.go | 84 +
vendor/github.com/spf13/pflag/int64_slice.go | 166 +
vendor/github.com/spf13/pflag/int8.go | 88 +
vendor/github.com/spf13/pflag/int_slice.go | 158 +
vendor/github.com/spf13/pflag/ip.go | 94 +
vendor/github.com/spf13/pflag/ip_slice.go | 186 +
vendor/github.com/spf13/pflag/ipmask.go | 122 +
vendor/github.com/spf13/pflag/ipnet.go | 98 +
vendor/github.com/spf13/pflag/string.go | 80 +
vendor/github.com/spf13/pflag/string_array.go | 129 +
vendor/github.com/spf13/pflag/string_slice.go | 163 +
.../github.com/spf13/pflag/string_to_int.go | 149 +
.../github.com/spf13/pflag/string_to_int64.go | 149 +
.../spf13/pflag/string_to_string.go | 160 +
vendor/github.com/spf13/pflag/uint.go | 88 +
vendor/github.com/spf13/pflag/uint16.go | 88 +
vendor/github.com/spf13/pflag/uint32.go | 88 +
vendor/github.com/spf13/pflag/uint64.go | 88 +
vendor/github.com/spf13/pflag/uint8.go | 88 +
vendor/github.com/spf13/pflag/uint_slice.go | 168 +
vendor/github.com/stretchr/testify/LICENSE | 21 +
.../testify/assert/assertion_compare.go | 458 +
.../assert/assertion_compare_can_convert.go | 16 +
.../assert/assertion_compare_legacy.go | 16 +
.../testify/assert/assertion_format.go | 753 +
.../testify/assert/assertion_format.go.tmpl | 5 +
.../testify/assert/assertion_forward.go | 1494 +
.../testify/assert/assertion_forward.go.tmpl | 5 +
.../testify/assert/assertion_order.go | 81 +
.../stretchr/testify/assert/assertions.go | 1811 +
.../github.com/stretchr/testify/assert/doc.go | 45 +
.../stretchr/testify/assert/errors.go | 10 +
.../testify/assert/forward_assertions.go | 16 +
.../testify/assert/http_assertions.go | 162 +
vendor/golang.org/x/crypto/AUTHORS | 3 +
vendor/golang.org/x/crypto/CONTRIBUTORS | 3 +
vendor/golang.org/x/crypto/LICENSE | 27 +
vendor/golang.org/x/crypto/PATENTS | 22 +
vendor/golang.org/x/crypto/bcrypt/base64.go | 35 +
vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 +
vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 +
.../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 +
.../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 +
.../x/crypto/blake2b/blake2b_amd64.go | 25 +
.../x/crypto/blake2b/blake2b_amd64.s | 279 +
.../x/crypto/blake2b/blake2b_generic.go | 182 +
.../x/crypto/blake2b/blake2b_ref.go | 12 +
vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 +
.../golang.org/x/crypto/blake2b/register.go | 33 +
vendor/golang.org/x/crypto/blowfish/block.go | 159 +
vendor/golang.org/x/crypto/blowfish/cipher.go | 99 +
vendor/golang.org/x/crypto/blowfish/const.go | 199 +
vendor/golang.org/x/crypto/cast5/cast5.go | 533 +
.../x/crypto/chacha20/chacha_arm64.go | 17 +
.../x/crypto/chacha20/chacha_arm64.s | 308 +
.../x/crypto/chacha20/chacha_generic.go | 398 +
.../x/crypto/chacha20/chacha_noasm.go | 14 +
.../x/crypto/chacha20/chacha_ppc64le.go | 17 +
.../x/crypto/chacha20/chacha_ppc64le.s | 450 +
.../x/crypto/chacha20/chacha_s390x.go | 27 +
.../x/crypto/chacha20/chacha_s390x.s | 225 +
vendor/golang.org/x/crypto/chacha20/xor.go | 42 +
.../x/crypto/curve25519/curve25519.go | 145 +
.../x/crypto/curve25519/internal/field/README | 7 +
.../x/crypto/curve25519/internal/field/fe.go | 416 +
.../curve25519/internal/field/fe_amd64.go | 13 +
.../curve25519/internal/field/fe_amd64.s | 379 +
.../internal/field/fe_amd64_noasm.go | 12 +
.../curve25519/internal/field/fe_arm64.go | 16 +
.../curve25519/internal/field/fe_arm64.s | 43 +
.../internal/field/fe_arm64_noasm.go | 12 +
.../curve25519/internal/field/fe_generic.go | 264 +
.../curve25519/internal/field/sync.checkpoint | 1 +
.../crypto/curve25519/internal/field/sync.sh | 19 +
vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 +
.../x/crypto/internal/poly1305/bits_compat.go | 40 +
.../x/crypto/internal/poly1305/bits_go1.13.go | 22 +
.../x/crypto/internal/poly1305/mac_noasm.go | 10 +
.../x/crypto/internal/poly1305/poly1305.go | 99 +
.../x/crypto/internal/poly1305/sum_amd64.go | 48 +
.../x/crypto/internal/poly1305/sum_amd64.s | 109 +
.../x/crypto/internal/poly1305/sum_generic.go | 310 +
.../x/crypto/internal/poly1305/sum_ppc64le.go | 48 +
.../x/crypto/internal/poly1305/sum_ppc64le.s | 182 +
.../x/crypto/internal/poly1305/sum_s390x.go | 76 +
.../x/crypto/internal/poly1305/sum_s390x.s | 504 +
.../x/crypto/internal/subtle/aliasing.go | 33 +
.../crypto/internal/subtle/aliasing_purego.go | 36 +
vendor/golang.org/x/crypto/nacl/box/box.go | 182 +
.../x/crypto/nacl/secretbox/secretbox.go | 173 +
.../x/crypto/openpgp/armor/armor.go | 230 +
.../x/crypto/openpgp/armor/encode.go | 160 +
.../x/crypto/openpgp/canonical_text.go | 59 +
.../x/crypto/openpgp/elgamal/elgamal.go | 130 +
.../x/crypto/openpgp/errors/errors.go | 78 +
vendor/golang.org/x/crypto/openpgp/keys.go | 693 +
.../x/crypto/openpgp/packet/compressed.go | 123 +
.../x/crypto/openpgp/packet/config.go | 91 +
.../x/crypto/openpgp/packet/encrypted_key.go | 208 +
.../x/crypto/openpgp/packet/literal.go | 89 +
.../x/crypto/openpgp/packet/ocfb.go | 143 +
.../openpgp/packet/one_pass_signature.go | 73 +
.../x/crypto/openpgp/packet/opaque.go | 162 +
.../x/crypto/openpgp/packet/packet.go | 590 +
.../x/crypto/openpgp/packet/private_key.go | 385 +
.../x/crypto/openpgp/packet/public_key.go | 753 +
.../x/crypto/openpgp/packet/public_key_v3.go | 279 +
.../x/crypto/openpgp/packet/reader.go | 76 +
.../x/crypto/openpgp/packet/signature.go | 731 +
.../x/crypto/openpgp/packet/signature_v3.go | 146 +
.../openpgp/packet/symmetric_key_encrypted.go | 155 +
.../openpgp/packet/symmetrically_encrypted.go | 290 +
.../x/crypto/openpgp/packet/userattribute.go | 91 +
.../x/crypto/openpgp/packet/userid.go | 160 +
vendor/golang.org/x/crypto/openpgp/read.go | 448 +
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 +
vendor/golang.org/x/crypto/openpgp/write.go | 418 +
.../x/crypto/salsa20/salsa/hsalsa20.go | 144 +
.../x/crypto/salsa20/salsa/salsa208.go | 199 +
.../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 +
.../x/crypto/salsa20/salsa/salsa20_amd64.s | 881 +
.../x/crypto/salsa20/salsa/salsa20_noasm.go | 15 +
.../x/crypto/salsa20/salsa/salsa20_ref.go | 231 +
vendor/golang.org/x/crypto/ssh/buffer.go | 97 +
vendor/golang.org/x/crypto/ssh/certs.go | 587 +
vendor/golang.org/x/crypto/ssh/channel.go | 633 +
vendor/golang.org/x/crypto/ssh/cipher.go | 789 +
vendor/golang.org/x/crypto/ssh/client.go | 282 +
vendor/golang.org/x/crypto/ssh/client_auth.go | 725 +
vendor/golang.org/x/crypto/ssh/common.go | 430 +
vendor/golang.org/x/crypto/ssh/connection.go | 143 +
vendor/golang.org/x/crypto/ssh/doc.go | 21 +
vendor/golang.org/x/crypto/ssh/handshake.go | 704 +
.../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 +
vendor/golang.org/x/crypto/ssh/kex.go | 774 +
vendor/golang.org/x/crypto/ssh/keys.go | 1447 +
vendor/golang.org/x/crypto/ssh/mac.go | 61 +
vendor/golang.org/x/crypto/ssh/messages.go | 877 +
vendor/golang.org/x/crypto/ssh/mux.go | 351 +
vendor/golang.org/x/crypto/ssh/server.go | 752 +
vendor/golang.org/x/crypto/ssh/session.go | 648 +
vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 +
vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 +
vendor/golang.org/x/crypto/ssh/tcpip.go | 474 +
.../x/crypto/ssh/terminal/terminal.go | 76 +
vendor/golang.org/x/crypto/ssh/transport.go | 357 +
vendor/golang.org/x/net/AUTHORS | 3 +
vendor/golang.org/x/net/CONTRIBUTORS | 3 +
vendor/golang.org/x/net/LICENSE | 27 +
vendor/golang.org/x/net/PATENTS | 22 +
vendor/golang.org/x/net/context/context.go | 56 +
.../x/net/context/ctxhttp/ctxhttp.go | 71 +
vendor/golang.org/x/net/context/go17.go | 73 +
vendor/golang.org/x/net/context/go19.go | 21 +
vendor/golang.org/x/net/context/pre_go17.go | 301 +
vendor/golang.org/x/net/context/pre_go19.go | 110 +
vendor/golang.org/x/net/html/atom/atom.go | 78 +
vendor/golang.org/x/net/html/atom/table.go | 783 +
vendor/golang.org/x/net/html/const.go | 111 +
vendor/golang.org/x/net/html/doc.go | 106 +
vendor/golang.org/x/net/html/doctype.go | 156 +
vendor/golang.org/x/net/html/entity.go | 2253 +
vendor/golang.org/x/net/html/escape.go | 258 +
vendor/golang.org/x/net/html/foreign.go | 222 +
vendor/golang.org/x/net/html/node.go | 225 +
vendor/golang.org/x/net/html/parse.go | 2460 +
vendor/golang.org/x/net/html/render.go | 273 +
vendor/golang.org/x/net/html/token.go | 1224 +
vendor/golang.org/x/net/publicsuffix/list.go | 181 +
vendor/golang.org/x/net/publicsuffix/table.go | 10583 +
vendor/golang.org/x/oauth2/.travis.yml | 13 +
vendor/golang.org/x/oauth2/AUTHORS | 3 +
vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 +
vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 +
vendor/golang.org/x/oauth2/LICENSE | 27 +
vendor/golang.org/x/oauth2/README.md | 36 +
.../x/oauth2/internal/client_appengine.go | 14 +
vendor/golang.org/x/oauth2/internal/doc.go | 6 +
vendor/golang.org/x/oauth2/internal/oauth2.go | 37 +
vendor/golang.org/x/oauth2/internal/token.go | 294 +
.../golang.org/x/oauth2/internal/transport.go | 33 +
vendor/golang.org/x/oauth2/oauth2.go | 381 +
vendor/golang.org/x/oauth2/token.go | 178 +
vendor/golang.org/x/oauth2/transport.go | 89 +
vendor/golang.org/x/sys/AUTHORS | 3 +
vendor/golang.org/x/sys/CONTRIBUTORS | 3 +
vendor/golang.org/x/sys/LICENSE | 27 +
vendor/golang.org/x/sys/PATENTS | 22 +
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 +
vendor/golang.org/x/sys/cpu/byteorder.go | 65 +
vendor/golang.org/x/sys/cpu/cpu.go | 287 +
vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 +
vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 +
vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 +
vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 +
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 +
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 +
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 17 +
.../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 +
.../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 +
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 +
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 +
vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 +
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 +
.../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 +
.../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 +
.../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 +
.../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 +
.../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 +
vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 +
vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 +
.../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 +
vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 +
.../golang.org/x/sys/cpu/cpu_other_arm64.go | 10 +
.../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 +
vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 +
vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 +
vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 +
vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 +
vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 +
vendor/golang.org/x/sys/cpu/cpu_x86.go | 145 +
vendor/golang.org/x/sys/cpu/cpu_x86.s | 28 +
vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 +
vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 +
vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 +
.../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 +
.../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 +
.../sys/internal/unsafeheader/unsafeheader.go | 30 +
vendor/golang.org/x/sys/plan9/asm.s | 8 +
vendor/golang.org/x/sys/plan9/asm_plan9_386.s | 30 +
.../golang.org/x/sys/plan9/asm_plan9_amd64.s | 30 +
vendor/golang.org/x/sys/plan9/asm_plan9_arm.s | 25 +
vendor/golang.org/x/sys/plan9/const_plan9.go | 70 +
vendor/golang.org/x/sys/plan9/dir_plan9.go | 212 +
vendor/golang.org/x/sys/plan9/env_plan9.go | 31 +
vendor/golang.org/x/sys/plan9/errors_plan9.go | 50 +
vendor/golang.org/x/sys/plan9/mkall.sh | 150 +
vendor/golang.org/x/sys/plan9/mkerrors.sh | 246 +
.../golang.org/x/sys/plan9/mksysnum_plan9.sh | 23 +
.../golang.org/x/sys/plan9/pwd_go15_plan9.go | 22 +
vendor/golang.org/x/sys/plan9/pwd_plan9.go | 24 +
vendor/golang.org/x/sys/plan9/race.go | 31 +
vendor/golang.org/x/sys/plan9/race0.go | 26 +
vendor/golang.org/x/sys/plan9/str.go | 23 +
vendor/golang.org/x/sys/plan9/syscall.go | 117 +
.../golang.org/x/sys/plan9/syscall_plan9.go | 351 +
.../x/sys/plan9/zsyscall_plan9_386.go | 285 +
.../x/sys/plan9/zsyscall_plan9_amd64.go | 285 +
.../x/sys/plan9/zsyscall_plan9_arm.go | 285 +
.../golang.org/x/sys/plan9/zsysnum_plan9.go | 49 +
vendor/golang.org/x/sys/unix/.gitignore | 2 +
vendor/golang.org/x/sys/unix/README.md | 184 +
.../golang.org/x/sys/unix/affinity_linux.go | 86 +
vendor/golang.org/x/sys/unix/aliases.go | 15 +
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 18 +
vendor/golang.org/x/sys/unix/asm_bsd_386.s | 29 +
vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 29 +
vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 29 +
vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 29 +
vendor/golang.org/x/sys/unix/asm_linux_386.s | 66 +
.../golang.org/x/sys/unix/asm_linux_amd64.s | 58 +
vendor/golang.org/x/sys/unix/asm_linux_arm.s | 57 +
.../golang.org/x/sys/unix/asm_linux_arm64.s | 53 +
.../golang.org/x/sys/unix/asm_linux_mips64x.s | 57 +
.../golang.org/x/sys/unix/asm_linux_mipsx.s | 55 +
.../golang.org/x/sys/unix/asm_linux_ppc64x.s | 45 +
.../golang.org/x/sys/unix/asm_linux_riscv64.s | 49 +
.../golang.org/x/sys/unix/asm_linux_s390x.s | 57 +
.../x/sys/unix/asm_openbsd_mips64.s | 30 +
.../golang.org/x/sys/unix/asm_solaris_amd64.s | 18 +
vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 +
.../golang.org/x/sys/unix/bluetooth_linux.go | 36 +
vendor/golang.org/x/sys/unix/cap_freebsd.go | 196 +
vendor/golang.org/x/sys/unix/constants.go | 14 +
vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 +
vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 +
vendor/golang.org/x/sys/unix/dev_darwin.go | 24 +
vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 +
vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 +
vendor/golang.org/x/sys/unix/dev_linux.go | 42 +
vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 +
vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 +
vendor/golang.org/x/sys/unix/dev_zos.go | 29 +
vendor/golang.org/x/sys/unix/dirent.go | 103 +
vendor/golang.org/x/sys/unix/endian_big.go | 10 +
vendor/golang.org/x/sys/unix/endian_little.go | 10 +
vendor/golang.org/x/sys/unix/env_unix.go | 32 +
vendor/golang.org/x/sys/unix/epoll_zos.go | 221 +
.../x/sys/unix/errors_freebsd_386.go | 233 +
.../x/sys/unix/errors_freebsd_amd64.go | 233 +
.../x/sys/unix/errors_freebsd_arm.go | 226 +
.../x/sys/unix/errors_freebsd_arm64.go | 17 +
vendor/golang.org/x/sys/unix/fcntl.go | 37 +
vendor/golang.org/x/sys/unix/fcntl_darwin.go | 24 +
.../x/sys/unix/fcntl_linux_32bit.go | 14 +
vendor/golang.org/x/sys/unix/fdset.go | 30 +
vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 +
vendor/golang.org/x/sys/unix/gccgo.go | 60 +
vendor/golang.org/x/sys/unix/gccgo_c.c | 45 +
.../x/sys/unix/gccgo_linux_amd64.go | 21 +
vendor/golang.org/x/sys/unix/ifreq_linux.go | 149 +
vendor/golang.org/x/sys/unix/ioctl.go | 75 +
vendor/golang.org/x/sys/unix/ioctl_linux.go | 219 +
vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 +
vendor/golang.org/x/sys/unix/mkall.sh | 231 +
vendor/golang.org/x/sys/unix/mkerrors.sh | 770 +
vendor/golang.org/x/sys/unix/pagesize_unix.go | 16 +
.../golang.org/x/sys/unix/pledge_openbsd.go | 163 +
vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 +
vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 +
vendor/golang.org/x/sys/unix/race.go | 31 +
vendor/golang.org/x/sys/unix/race0.go | 26 +
.../x/sys/unix/readdirent_getdents.go | 13 +
.../x/sys/unix/readdirent_getdirentries.go | 20 +
.../x/sys/unix/sockcmsg_dragonfly.go | 16 +
.../golang.org/x/sys/unix/sockcmsg_linux.go | 85 +
vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 93 +
.../x/sys/unix/sockcmsg_unix_other.go | 47 +
vendor/golang.org/x/sys/unix/str.go | 27 +
vendor/golang.org/x/sys/unix/syscall.go | 95 +
vendor/golang.org/x/sys/unix/syscall_aix.go | 545 +
.../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 +
.../x/sys/unix/syscall_aix_ppc64.go | 85 +
vendor/golang.org/x/sys/unix/syscall_bsd.go | 625 +
.../x/sys/unix/syscall_darwin.1_12.go | 32 +
.../x/sys/unix/syscall_darwin.1_13.go | 108 +
.../golang.org/x/sys/unix/syscall_darwin.go | 733 +
.../x/sys/unix/syscall_darwin_amd64.go | 51 +
.../x/sys/unix/syscall_darwin_arm64.go | 51 +
.../x/sys/unix/syscall_darwin_libSystem.go | 27 +
.../x/sys/unix/syscall_dragonfly.go | 542 +
.../x/sys/unix/syscall_dragonfly_amd64.go | 57 +
.../golang.org/x/sys/unix/syscall_freebsd.go | 869 +
.../x/sys/unix/syscall_freebsd_386.go | 67 +
.../x/sys/unix/syscall_freebsd_amd64.go | 67 +
.../x/sys/unix/syscall_freebsd_arm.go | 63 +
.../x/sys/unix/syscall_freebsd_arm64.go | 63 +
.../golang.org/x/sys/unix/syscall_illumos.go | 186 +
vendor/golang.org/x/sys/unix/syscall_linux.go | 2451 +
.../x/sys/unix/syscall_linux_386.go | 346 +
.../x/sys/unix/syscall_linux_alarm.go | 14 +
.../x/sys/unix/syscall_linux_amd64.go | 150 +
.../x/sys/unix/syscall_linux_amd64_gc.go | 13 +
.../x/sys/unix/syscall_linux_arm.go | 248 +
.../x/sys/unix/syscall_linux_arm64.go | 198 +
.../golang.org/x/sys/unix/syscall_linux_gc.go | 15 +
.../x/sys/unix/syscall_linux_gc_386.go | 17 +
.../x/sys/unix/syscall_linux_gc_arm.go | 14 +
.../x/sys/unix/syscall_linux_gccgo_386.go | 31 +
.../x/sys/unix/syscall_linux_gccgo_arm.go | 21 +
.../x/sys/unix/syscall_linux_mips64x.go | 195 +
.../x/sys/unix/syscall_linux_mipsx.go | 207 +
.../x/sys/unix/syscall_linux_ppc.go | 236 +
.../x/sys/unix/syscall_linux_ppc64x.go | 122 +
.../x/sys/unix/syscall_linux_riscv64.go | 183 +
.../x/sys/unix/syscall_linux_s390x.go | 302 +
.../x/sys/unix/syscall_linux_sparc64.go | 118 +
.../golang.org/x/sys/unix/syscall_netbsd.go | 609 +
.../x/sys/unix/syscall_netbsd_386.go | 38 +
.../x/sys/unix/syscall_netbsd_amd64.go | 38 +
.../x/sys/unix/syscall_netbsd_arm.go | 38 +
.../x/sys/unix/syscall_netbsd_arm64.go | 38 +
.../golang.org/x/sys/unix/syscall_openbsd.go | 387 +
.../x/sys/unix/syscall_openbsd_386.go | 42 +
.../x/sys/unix/syscall_openbsd_amd64.go | 42 +
.../x/sys/unix/syscall_openbsd_arm.go | 42 +
.../x/sys/unix/syscall_openbsd_arm64.go | 42 +
.../x/sys/unix/syscall_openbsd_mips64.go | 35 +
.../golang.org/x/sys/unix/syscall_solaris.go | 1004 +
.../x/sys/unix/syscall_solaris_amd64.go | 28 +
vendor/golang.org/x/sys/unix/syscall_unix.go | 486 +
.../golang.org/x/sys/unix/syscall_unix_gc.go | 18 +
.../x/sys/unix/syscall_unix_gc_ppc64x.go | 25 +
.../x/sys/unix/syscall_zos_s390x.go | 1823 +
vendor/golang.org/x/sys/unix/sysvshm_linux.go | 21 +
vendor/golang.org/x/sys/unix/sysvshm_unix.go | 61 +
.../x/sys/unix/sysvshm_unix_other.go | 14 +
vendor/golang.org/x/sys/unix/timestruct.go | 77 +
.../golang.org/x/sys/unix/unveil_openbsd.go | 42 +
vendor/golang.org/x/sys/unix/xattr_bsd.go | 241 +
.../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1385 +
.../x/sys/unix/zerrors_aix_ppc64.go | 1386 +
.../x/sys/unix/zerrors_darwin_amd64.go | 1892 +
.../x/sys/unix/zerrors_darwin_arm64.go | 1892 +
.../x/sys/unix/zerrors_dragonfly_amd64.go | 1738 +
.../x/sys/unix/zerrors_freebsd_386.go | 1948 +
.../x/sys/unix/zerrors_freebsd_amd64.go | 1947 +
.../x/sys/unix/zerrors_freebsd_arm.go | 1846 +
.../x/sys/unix/zerrors_freebsd_arm64.go | 1948 +
vendor/golang.org/x/sys/unix/zerrors_linux.go | 3060 +
.../x/sys/unix/zerrors_linux_386.go | 826 +
.../x/sys/unix/zerrors_linux_amd64.go | 826 +
.../x/sys/unix/zerrors_linux_arm.go | 832 +
.../x/sys/unix/zerrors_linux_arm64.go | 823 +
.../x/sys/unix/zerrors_linux_mips.go | 833 +
.../x/sys/unix/zerrors_linux_mips64.go | 833 +
.../x/sys/unix/zerrors_linux_mips64le.go | 833 +
.../x/sys/unix/zerrors_linux_mipsle.go | 833 +
.../x/sys/unix/zerrors_linux_ppc.go | 885 +
.../x/sys/unix/zerrors_linux_ppc64.go | 889 +
.../x/sys/unix/zerrors_linux_ppc64le.go | 889 +
.../x/sys/unix/zerrors_linux_riscv64.go | 813 +
.../x/sys/unix/zerrors_linux_s390x.go | 888 +
.../x/sys/unix/zerrors_linux_sparc64.go | 883 +
.../x/sys/unix/zerrors_netbsd_386.go | 1780 +
.../x/sys/unix/zerrors_netbsd_amd64.go | 1770 +
.../x/sys/unix/zerrors_netbsd_arm.go | 1759 +
.../x/sys/unix/zerrors_netbsd_arm64.go | 1770 +
.../x/sys/unix/zerrors_openbsd_386.go | 1668 +
.../x/sys/unix/zerrors_openbsd_amd64.go | 1775 +
.../x/sys/unix/zerrors_openbsd_arm.go | 1670 +
.../x/sys/unix/zerrors_openbsd_arm64.go | 1798 +
.../x/sys/unix/zerrors_openbsd_mips64.go | 1863 +
.../x/sys/unix/zerrors_solaris_amd64.go | 1557 +
.../x/sys/unix/zerrors_zos_s390x.go | 860 +
.../x/sys/unix/zptrace_armnn_linux.go | 42 +
.../x/sys/unix/zptrace_linux_arm64.go | 17 +
.../x/sys/unix/zptrace_mipsnn_linux.go | 51 +
.../x/sys/unix/zptrace_mipsnnle_linux.go | 51 +
.../x/sys/unix/zptrace_x86_linux.go | 81 +
.../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1485 +
.../x/sys/unix/zsyscall_aix_ppc64.go | 1443 +
.../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 +
.../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 +
.../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 +
.../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 +
.../x/sys/unix/zsyscall_darwin_amd64.go | 2495 +
.../x/sys/unix/zsyscall_darwin_amd64.s | 883 +
.../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 +
.../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 +
.../x/sys/unix/zsyscall_darwin_arm64.go | 2495 +
.../x/sys/unix/zsyscall_darwin_arm64.s | 883 +
.../x/sys/unix/zsyscall_dragonfly_amd64.go | 1679 +
.../x/sys/unix/zsyscall_freebsd_386.go | 2016 +
.../x/sys/unix/zsyscall_freebsd_amd64.go | 2016 +
.../x/sys/unix/zsyscall_freebsd_arm.go | 2016 +
.../x/sys/unix/zsyscall_freebsd_arm64.go | 2016 +
.../x/sys/unix/zsyscall_illumos_amd64.go | 128 +
.../golang.org/x/sys/unix/zsyscall_linux.go | 2074 +
.../x/sys/unix/zsyscall_linux_386.go | 537 +
.../x/sys/unix/zsyscall_linux_amd64.go | 693 +
.../x/sys/unix/zsyscall_linux_arm.go | 652 +
.../x/sys/unix/zsyscall_linux_arm64.go | 592 +
.../x/sys/unix/zsyscall_linux_mips.go | 704 +
.../x/sys/unix/zsyscall_linux_mips64.go | 698 +
.../x/sys/unix/zsyscall_linux_mips64le.go | 687 +
.../x/sys/unix/zsyscall_linux_mipsle.go | 704 +
.../x/sys/unix/zsyscall_linux_ppc.go | 709 +
.../x/sys/unix/zsyscall_linux_ppc64.go | 755 +
.../x/sys/unix/zsyscall_linux_ppc64le.go | 755 +
.../x/sys/unix/zsyscall_linux_riscv64.go | 572 +
.../x/sys/unix/zsyscall_linux_s390x.go | 546 +
.../x/sys/unix/zsyscall_linux_sparc64.go | 699 +
.../x/sys/unix/zsyscall_netbsd_386.go | 1850 +
.../x/sys/unix/zsyscall_netbsd_amd64.go | 1850 +
.../x/sys/unix/zsyscall_netbsd_arm.go | 1850 +
.../x/sys/unix/zsyscall_netbsd_arm64.go | 1850 +
.../x/sys/unix/zsyscall_openbsd_386.go | 1693 +
.../x/sys/unix/zsyscall_openbsd_amd64.go | 1693 +
.../x/sys/unix/zsyscall_openbsd_arm.go | 1693 +
.../x/sys/unix/zsyscall_openbsd_arm64.go | 1693 +
.../x/sys/unix/zsyscall_openbsd_mips64.go | 1693 +
.../x/sys/unix/zsyscall_solaris_amd64.go | 2053 +
.../x/sys/unix/zsyscall_zos_s390x.go | 1255 +
.../x/sys/unix/zsysctl_openbsd_386.go | 274 +
.../x/sys/unix/zsysctl_openbsd_amd64.go | 272 +
.../x/sys/unix/zsysctl_openbsd_arm.go | 274 +
.../x/sys/unix/zsysctl_openbsd_arm64.go | 276 +
.../x/sys/unix/zsysctl_openbsd_mips64.go | 280 +
.../x/sys/unix/zsysnum_darwin_amd64.go | 440 +
.../x/sys/unix/zsysnum_darwin_arm64.go | 438 +
.../x/sys/unix/zsysnum_dragonfly_amd64.go | 317 +
.../x/sys/unix/zsysnum_freebsd_386.go | 397 +
.../x/sys/unix/zsysnum_freebsd_amd64.go | 397 +
.../x/sys/unix/zsysnum_freebsd_arm.go | 397 +
.../x/sys/unix/zsysnum_freebsd_arm64.go | 397 +
.../x/sys/unix/zsysnum_linux_386.go | 449 +
.../x/sys/unix/zsysnum_linux_amd64.go | 371 +
.../x/sys/unix/zsysnum_linux_arm.go | 413 +
.../x/sys/unix/zsysnum_linux_arm64.go | 316 +
.../x/sys/unix/zsysnum_linux_mips.go | 433 +
.../x/sys/unix/zsysnum_linux_mips64.go | 363 +
.../x/sys/unix/zsysnum_linux_mips64le.go | 363 +
.../x/sys/unix/zsysnum_linux_mipsle.go | 433 +
.../x/sys/unix/zsysnum_linux_ppc.go | 440 +
.../x/sys/unix/zsysnum_linux_ppc64.go | 412 +
.../x/sys/unix/zsysnum_linux_ppc64le.go | 412 +
.../x/sys/unix/zsysnum_linux_riscv64.go | 314 +
.../x/sys/unix/zsysnum_linux_s390x.go | 377 +
.../x/sys/unix/zsysnum_linux_sparc64.go | 391 +
.../x/sys/unix/zsysnum_netbsd_386.go | 275 +
.../x/sys/unix/zsysnum_netbsd_amd64.go | 275 +
.../x/sys/unix/zsysnum_netbsd_arm.go | 275 +
.../x/sys/unix/zsysnum_netbsd_arm64.go | 275 +
.../x/sys/unix/zsysnum_openbsd_386.go | 219 +
.../x/sys/unix/zsysnum_openbsd_amd64.go | 219 +
.../x/sys/unix/zsysnum_openbsd_arm.go | 219 +
.../x/sys/unix/zsysnum_openbsd_arm64.go | 218 +
.../x/sys/unix/zsysnum_openbsd_mips64.go | 221 +
.../x/sys/unix/zsysnum_zos_s390x.go | 2670 +
.../golang.org/x/sys/unix/ztypes_aix_ppc.go | 354 +
.../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 358 +
.../x/sys/unix/ztypes_darwin_amd64.go | 768 +
.../x/sys/unix/ztypes_darwin_arm64.go | 768 +
.../x/sys/unix/ztypes_dragonfly_amd64.go | 474 +
.../x/sys/unix/ztypes_freebsd_386.go | 723 +
.../x/sys/unix/ztypes_freebsd_amd64.go | 726 +
.../x/sys/unix/ztypes_freebsd_arm.go | 707 +
.../x/sys/unix/ztypes_freebsd_arm64.go | 704 +
.../x/sys/unix/ztypes_illumos_amd64.go | 42 +
vendor/golang.org/x/sys/unix/ztypes_linux.go | 5534 +
.../golang.org/x/sys/unix/ztypes_linux_386.go | 677 +
.../x/sys/unix/ztypes_linux_amd64.go | 693 +
.../golang.org/x/sys/unix/ztypes_linux_arm.go | 672 +
.../x/sys/unix/ztypes_linux_arm64.go | 672 +
.../x/sys/unix/ztypes_linux_mips.go | 677 +
.../x/sys/unix/ztypes_linux_mips64.go | 675 +
.../x/sys/unix/ztypes_linux_mips64le.go | 675 +
.../x/sys/unix/ztypes_linux_mipsle.go | 677 +
.../golang.org/x/sys/unix/ztypes_linux_ppc.go | 685 +
.../x/sys/unix/ztypes_linux_ppc64.go | 681 +
.../x/sys/unix/ztypes_linux_ppc64le.go | 681 +
.../x/sys/unix/ztypes_linux_riscv64.go | 700 +
.../x/sys/unix/ztypes_linux_s390x.go | 695 +
.../x/sys/unix/ztypes_linux_sparc64.go | 676 +
.../x/sys/unix/ztypes_netbsd_386.go | 502 +
.../x/sys/unix/ztypes_netbsd_amd64.go | 510 +
.../x/sys/unix/ztypes_netbsd_arm.go | 507 +
.../x/sys/unix/ztypes_netbsd_arm64.go | 510 +
.../x/sys/unix/ztypes_openbsd_386.go | 574 +
.../x/sys/unix/ztypes_openbsd_amd64.go | 574 +
.../x/sys/unix/ztypes_openbsd_arm.go | 575 +
.../x/sys/unix/ztypes_openbsd_arm64.go | 568 +
.../x/sys/unix/ztypes_openbsd_mips64.go | 568 +
.../x/sys/unix/ztypes_solaris_amd64.go | 482 +
.../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 +
vendor/golang.org/x/sys/windows/aliases.go | 13 +
.../golang.org/x/sys/windows/dll_windows.go | 416 +
vendor/golang.org/x/sys/windows/empty.s | 9 +
.../golang.org/x/sys/windows/env_windows.go | 54 +
vendor/golang.org/x/sys/windows/eventlog.go | 21 +
.../golang.org/x/sys/windows/exec_windows.go | 178 +
.../x/sys/windows/memory_windows.go | 48 +
vendor/golang.org/x/sys/windows/mkerrors.bash | 70 +
.../x/sys/windows/mkknownfolderids.bash | 27 +
vendor/golang.org/x/sys/windows/mksyscall.go | 10 +
vendor/golang.org/x/sys/windows/race.go | 31 +
vendor/golang.org/x/sys/windows/race0.go | 26 +
.../x/sys/windows/security_windows.go | 1444 +
vendor/golang.org/x/sys/windows/service.go | 247 +
.../x/sys/windows/setupapi_windows.go | 1425 +
vendor/golang.org/x/sys/windows/str.go | 23 +
vendor/golang.org/x/sys/windows/syscall.go | 113 +
.../x/sys/windows/syscall_windows.go | 1701 +
.../golang.org/x/sys/windows/types_windows.go | 3176 +
.../x/sys/windows/types_windows_386.go | 35 +
.../x/sys/windows/types_windows_amd64.go | 34 +
.../x/sys/windows/types_windows_arm.go | 35 +
.../x/sys/windows/types_windows_arm64.go | 34 +
.../x/sys/windows/zerrors_windows.go | 9468 +
.../x/sys/windows/zknownfolderids_windows.go | 149 +
.../x/sys/windows/zsyscall_windows.go | 4196 +
vendor/golang.org/x/term/AUTHORS | 3 +
vendor/golang.org/x/term/CONTRIBUTING.md | 26 +
vendor/golang.org/x/term/CONTRIBUTORS | 3 +
vendor/golang.org/x/term/LICENSE | 27 +
vendor/golang.org/x/term/PATENTS | 22 +
vendor/golang.org/x/term/README.md | 19 +
vendor/golang.org/x/term/codereview.cfg | 1 +
vendor/golang.org/x/term/term.go | 60 +
vendor/golang.org/x/term/term_plan9.go | 42 +
vendor/golang.org/x/term/term_unix.go | 92 +
vendor/golang.org/x/term/term_unix_bsd.go | 13 +
vendor/golang.org/x/term/term_unix_other.go | 13 +
vendor/golang.org/x/term/term_unsupported.go | 39 +
vendor/golang.org/x/term/term_windows.go | 79 +
vendor/golang.org/x/term/terminal.go | 987 +
vendor/google.golang.org/appengine/LICENSE | 202 +
.../appengine/internal/api.go | 678 +
.../appengine/internal/api_classic.go | 169 +
.../appengine/internal/api_common.go | 123 +
.../appengine/internal/app_id.go | 28 +
.../appengine/internal/base/api_base.pb.go | 308 +
.../appengine/internal/base/api_base.proto | 33 +
.../internal/datastore/datastore_v3.pb.go | 4367 +
.../internal/datastore/datastore_v3.proto | 551 +
.../appengine/internal/identity.go | 55 +
.../appengine/internal/identity_classic.go | 61 +
.../appengine/internal/identity_flex.go | 11 +
.../appengine/internal/identity_vm.go | 134 +
.../appengine/internal/internal.go | 110 +
.../appengine/internal/log/log_service.pb.go | 1313 +
.../appengine/internal/log/log_service.proto | 150 +
.../appengine/internal/main.go | 16 +
.../appengine/internal/main_common.go | 7 +
.../appengine/internal/main_vm.go | 69 +
.../appengine/internal/metadata.go | 60 +
.../appengine/internal/net.go | 56 +
.../appengine/internal/regen.sh | 40 +
.../internal/remote_api/remote_api.pb.go | 361 +
.../internal/remote_api/remote_api.proto | 44 +
.../appengine/internal/transaction.go | 115 +
.../internal/urlfetch/urlfetch_service.pb.go | 527 +
.../internal/urlfetch/urlfetch_service.proto | 64 +
.../appengine/urlfetch/urlfetch.go | 210 +
vendor/google.golang.org/protobuf/AUTHORS | 3 +
.../google.golang.org/protobuf/CONTRIBUTORS | 3 +
vendor/google.golang.org/protobuf/LICENSE | 27 +
vendor/google.golang.org/protobuf/PATENTS | 22 +
.../protobuf/encoding/prototext/decode.go | 770 +
.../protobuf/encoding/prototext/doc.go | 7 +
.../protobuf/encoding/prototext/encode.go | 371 +
.../protobuf/encoding/protowire/wire.go | 547 +
.../protobuf/internal/descfmt/stringer.go | 318 +
.../protobuf/internal/descopts/options.go | 29 +
.../protobuf/internal/detrand/rand.go | 69 +
.../internal/encoding/defval/default.go | 213 +
.../encoding/messageset/messageset.go | 241 +
.../protobuf/internal/encoding/tag/tag.go | 207 +
.../protobuf/internal/encoding/text/decode.go | 665 +
.../internal/encoding/text/decode_number.go | 190 +
.../internal/encoding/text/decode_string.go | 161 +
.../internal/encoding/text/decode_token.go | 373 +
.../protobuf/internal/encoding/text/doc.go | 29 +
.../protobuf/internal/encoding/text/encode.go | 270 +
.../protobuf/internal/errors/errors.go | 89 +
.../protobuf/internal/errors/is_go112.go | 40 +
.../protobuf/internal/errors/is_go113.go | 13 +
.../protobuf/internal/filedesc/build.go | 158 +
.../protobuf/internal/filedesc/desc.go | 631 +
.../protobuf/internal/filedesc/desc_init.go | 471 +
.../protobuf/internal/filedesc/desc_lazy.go | 704 +
.../protobuf/internal/filedesc/desc_list.go | 450 +
.../internal/filedesc/desc_list_gen.go | 356 +
.../protobuf/internal/filedesc/placeholder.go | 107 +
.../protobuf/internal/filetype/build.go | 297 +
.../protobuf/internal/flags/flags.go | 24 +
.../internal/flags/proto_legacy_disable.go | 10 +
.../internal/flags/proto_legacy_enable.go | 10 +
.../protobuf/internal/genid/any_gen.go | 34 +
.../protobuf/internal/genid/api_gen.go | 106 +
.../protobuf/internal/genid/descriptor_gen.go | 829 +
.../protobuf/internal/genid/doc.go | 11 +
.../protobuf/internal/genid/duration_gen.go | 34 +
.../protobuf/internal/genid/empty_gen.go | 19 +
.../protobuf/internal/genid/field_mask_gen.go | 31 +
.../protobuf/internal/genid/goname.go | 25 +
.../protobuf/internal/genid/map_entry.go | 16 +
.../internal/genid/source_context_gen.go | 31 +
.../protobuf/internal/genid/struct_gen.go | 116 +
.../protobuf/internal/genid/timestamp_gen.go | 34 +
.../protobuf/internal/genid/type_gen.go | 184 +
.../protobuf/internal/genid/wrappers.go | 13 +
.../protobuf/internal/genid/wrappers_gen.go | 175 +
.../protobuf/internal/impl/api_export.go | 177 +
.../protobuf/internal/impl/checkinit.go | 141 +
.../protobuf/internal/impl/codec_extension.go | 223 +
.../protobuf/internal/impl/codec_field.go | 830 +
.../protobuf/internal/impl/codec_gen.go | 5637 +
.../protobuf/internal/impl/codec_map.go | 388 +
.../protobuf/internal/impl/codec_map_go111.go | 38 +
.../protobuf/internal/impl/codec_map_go112.go | 12 +
.../protobuf/internal/impl/codec_message.go | 217 +
.../internal/impl/codec_messageset.go | 123 +
.../protobuf/internal/impl/codec_reflect.go | 210 +
.../protobuf/internal/impl/codec_tables.go | 557 +
.../protobuf/internal/impl/codec_unsafe.go | 18 +
.../protobuf/internal/impl/convert.go | 496 +
.../protobuf/internal/impl/convert_list.go | 141 +
.../protobuf/internal/impl/convert_map.go | 121 +
.../protobuf/internal/impl/decode.go | 284 +
.../protobuf/internal/impl/encode.go | 201 +
.../protobuf/internal/impl/enum.go | 21 +
.../protobuf/internal/impl/extension.go | 156 +
.../protobuf/internal/impl/legacy_enum.go | 219 +
.../protobuf/internal/impl/legacy_export.go | 92 +
.../internal/impl/legacy_extension.go | 176 +
.../protobuf/internal/impl/legacy_file.go | 81 +
.../protobuf/internal/impl/legacy_message.go | 565 +
.../protobuf/internal/impl/merge.go | 176 +
.../protobuf/internal/impl/merge_gen.go | 209 +
.../protobuf/internal/impl/message.go | 276 +
.../protobuf/internal/impl/message_reflect.go | 465 +
.../internal/impl/message_reflect_field.go | 543 +
.../internal/impl/message_reflect_gen.go | 249 +
.../protobuf/internal/impl/pointer_reflect.go | 179 +
.../protobuf/internal/impl/pointer_unsafe.go | 175 +
.../protobuf/internal/impl/validate.go | 576 +
.../protobuf/internal/impl/weak.go | 74 +
.../protobuf/internal/order/order.go | 89 +
.../protobuf/internal/order/range.go | 115 +
.../protobuf/internal/pragma/pragma.go | 29 +
.../protobuf/internal/set/ints.go | 58 +
.../protobuf/internal/strs/strings.go | 196 +
.../protobuf/internal/strs/strings_pure.go | 28 +
.../protobuf/internal/strs/strings_unsafe.go | 95 +
.../protobuf/internal/version/version.go | 79 +
.../protobuf/proto/checkinit.go | 71 +
.../protobuf/proto/decode.go | 293 +
.../protobuf/proto/decode_gen.go | 603 +
.../google.golang.org/protobuf/proto/doc.go | 94 +
.../protobuf/proto/encode.go | 319 +
.../protobuf/proto/encode_gen.go | 97 +
.../google.golang.org/protobuf/proto/equal.go | 167 +
.../protobuf/proto/extension.go | 92 +
.../google.golang.org/protobuf/proto/merge.go | 139 +
.../protobuf/proto/messageset.go | 93 +
.../google.golang.org/protobuf/proto/proto.go | 43 +
.../protobuf/proto/proto_methods.go | 20 +
.../protobuf/proto/proto_reflect.go | 20 +
.../google.golang.org/protobuf/proto/reset.go | 43 +
.../google.golang.org/protobuf/proto/size.go | 97 +
.../protobuf/proto/size_gen.go | 55 +
.../protobuf/proto/wrappers.go | 29 +
.../protobuf/reflect/protodesc/desc.go | 276 +
.../protobuf/reflect/protodesc/desc_init.go | 248 +
.../reflect/protodesc/desc_resolve.go | 286 +
.../reflect/protodesc/desc_validate.go | 374 +
.../protobuf/reflect/protodesc/proto.go | 252 +
.../protobuf/reflect/protoreflect/methods.go | 78 +
.../protobuf/reflect/protoreflect/proto.go | 504 +
.../protobuf/reflect/protoreflect/source.go | 128 +
.../reflect/protoreflect/source_gen.go | 461 +
.../protobuf/reflect/protoreflect/type.go | 665 +
.../protobuf/reflect/protoreflect/value.go | 285 +
.../reflect/protoreflect/value_pure.go | 60 +
.../reflect/protoreflect/value_union.go | 436 +
.../reflect/protoreflect/value_unsafe.go | 99 +
.../reflect/protoregistry/registry.go | 880 +
.../protobuf/runtime/protoiface/legacy.go | 15 +
.../protobuf/runtime/protoiface/methods.go | 168 +
.../protobuf/runtime/protoimpl/impl.go | 44 +
.../protobuf/runtime/protoimpl/version.go | 56 +
.../types/descriptorpb/descriptor.pb.go | 3957 +
vendor/gopkg.in/errgo.v1/LICENSE | 26 +
vendor/gopkg.in/errgo.v1/README.md | 259 +
vendor/gopkg.in/errgo.v1/errors.go | 389 +
vendor/gopkg.in/httprequest.v1/.travis.yml | 5 +
vendor/gopkg.in/httprequest.v1/LICENSE | 185 +
vendor/gopkg.in/httprequest.v1/README.md | 690 +
vendor/gopkg.in/httprequest.v1/client.go | 308 +
vendor/gopkg.in/httprequest.v1/error.go | 121 +
vendor/gopkg.in/httprequest.v1/fancyerror.go | 277 +
vendor/gopkg.in/httprequest.v1/handler.go | 669 +
vendor/gopkg.in/httprequest.v1/marshal.go | 435 +
vendor/gopkg.in/httprequest.v1/type.go | 412 +
vendor/gopkg.in/httprequest.v1/unmarshal.go | 262 +
vendor/gopkg.in/macaroon-bakery.v2/LICENSE | 187 +
.../macaroon-bakery.v2/bakery/bakery.go | 97 +
.../macaroon-bakery.v2/bakery/checker.go | 503 +
.../bakery/checkers/checkers.go | 246 +
.../bakery/checkers/declared.go | 137 +
.../bakery/checkers/namespace.go | 214 +
.../bakery/checkers/time.go | 97 +
.../macaroon-bakery.v2/bakery/codec.go | 381 +
.../macaroon-bakery.v2/bakery/discharge.go | 282 +
.../macaroon-bakery.v2/bakery/dischargeall.go | 56 +
.../gopkg.in/macaroon-bakery.v2/bakery/doc.go | 88 +
.../macaroon-bakery.v2/bakery/error.go | 77 +
.../macaroon-bakery.v2/bakery/keys.go | 219 +
.../macaroon-bakery.v2/bakery/logger.go | 28 +
.../macaroon-bakery.v2/bakery/macaroon.go | 356 +
.../macaroon-bakery.v2/bakery/oven.go | 359 +
.../macaroon-bakery.v2/bakery/slice.go | 134 +
.../macaroon-bakery.v2/bakery/store.go | 63 +
.../macaroon-bakery.v2/bakery/version.go | 30 +
.../macaroon-bakery.v2/httpbakery/browser.go | 200 +
.../macaroon-bakery.v2/httpbakery/checkers.go | 157 +
.../macaroon-bakery.v2/httpbakery/client.go | 727 +
.../httpbakery/context_go17.go | 12 +
.../httpbakery/context_prego17.go | 12 +
.../httpbakery/discharge.go | 367 +
.../httpbakery/dischargeclient_generated.go | 35 +
.../macaroon-bakery.v2/httpbakery/error.go | 359 +
.../macaroon-bakery.v2/httpbakery/keyring.go | 113 +
.../macaroon-bakery.v2/httpbakery/oven.go | 88 +
.../macaroon-bakery.v2/httpbakery/request.go | 196 +
.../macaroon-bakery.v2/httpbakery/visitor.go | 68 +
.../internal/httputil/relativeurl.go | 64 +
vendor/gopkg.in/macaroon.v2/.gitignore | 1 +
vendor/gopkg.in/macaroon.v2/.travis.yml | 11 +
vendor/gopkg.in/macaroon.v2/LICENSE | 26 +
vendor/gopkg.in/macaroon.v2/README.md | 355 +
vendor/gopkg.in/macaroon.v2/TODO | 4 +
vendor/gopkg.in/macaroon.v2/crypto.go | 91 +
vendor/gopkg.in/macaroon.v2/dependencies.tsv | 5 +
vendor/gopkg.in/macaroon.v2/macaroon.go | 399 +
vendor/gopkg.in/macaroon.v2/marshal-v1.go | 190 +
vendor/gopkg.in/macaroon.v2/marshal-v2.go | 253 +
vendor/gopkg.in/macaroon.v2/marshal.go | 239 +
vendor/gopkg.in/macaroon.v2/packet-v1.go | 133 +
vendor/gopkg.in/macaroon.v2/packet-v2.go | 117 +
vendor/gopkg.in/macaroon.v2/trace.go | 102 +
.../natefinch/lumberjack.v2/.gitignore | 23 +
.../natefinch/lumberjack.v2/.travis.yml | 6 +
.../gopkg.in/natefinch/lumberjack.v2/LICENSE | 21 +
.../natefinch/lumberjack.v2/README.md | 179 +
.../gopkg.in/natefinch/lumberjack.v2/chown.go | 11 +
.../natefinch/lumberjack.v2/chown_linux.go | 19 +
.../natefinch/lumberjack.v2/lumberjack.go | 541 +
vendor/gopkg.in/yaml.v3/LICENSE | 50 +
vendor/gopkg.in/yaml.v3/NOTICE | 13 +
vendor/gopkg.in/yaml.v3/README.md | 150 +
vendor/gopkg.in/yaml.v3/apic.go | 747 +
vendor/gopkg.in/yaml.v3/decode.go | 1000 +
vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +
vendor/gopkg.in/yaml.v3/encode.go | 577 +
vendor/gopkg.in/yaml.v3/parserc.go | 1258 +
vendor/gopkg.in/yaml.v3/readerc.go | 434 +
vendor/gopkg.in/yaml.v3/resolve.go | 326 +
vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +
vendor/gopkg.in/yaml.v3/sorter.go | 134 +
vendor/gopkg.in/yaml.v3/writerc.go | 48 +
vendor/gopkg.in/yaml.v3/yaml.go | 698 +
vendor/gopkg.in/yaml.v3/yamlh.go | 807 +
vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 +
vendor/gorm.io/driver/mysql/License | 21 +
vendor/gorm.io/driver/mysql/README.md | 51 +
vendor/gorm.io/driver/mysql/migrator.go | 239 +
vendor/gorm.io/driver/mysql/mysql.go | 405 +
vendor/gorm.io/driver/sqlite/License | 21 +
vendor/gorm.io/driver/sqlite/README.md | 17 +
vendor/gorm.io/driver/sqlite/ddlmod.go | 220 +
vendor/gorm.io/driver/sqlite/errors.go | 7 +
vendor/gorm.io/driver/sqlite/migrator.go | 418 +
vendor/gorm.io/driver/sqlite/sqlite.go | 220 +
vendor/gorm.io/gorm/.gitignore | 6 +
vendor/gorm.io/gorm/.golangci.yml | 11 +
vendor/gorm.io/gorm/License | 21 +
vendor/gorm.io/gorm/README.md | 43 +
vendor/gorm.io/gorm/association.go | 522 +
vendor/gorm.io/gorm/callbacks.go | 331 +
vendor/gorm.io/gorm/callbacks/associations.go | 419 +
vendor/gorm.io/gorm/callbacks/callbacks.go | 83 +
vendor/gorm.io/gorm/callbacks/callmethod.go | 23 +
vendor/gorm.io/gorm/callbacks/create.go | 343 +
vendor/gorm.io/gorm/callbacks/delete.go | 185 +
vendor/gorm.io/gorm/callbacks/helper.go | 152 +
vendor/gorm.io/gorm/callbacks/interfaces.go | 39 +
vendor/gorm.io/gorm/callbacks/preload.go | 173 +
vendor/gorm.io/gorm/callbacks/query.go | 273 +
vendor/gorm.io/gorm/callbacks/raw.go | 17 +
vendor/gorm.io/gorm/callbacks/row.go | 23 +
vendor/gorm.io/gorm/callbacks/transaction.go | 32 +
vendor/gorm.io/gorm/callbacks/update.go | 291 +
vendor/gorm.io/gorm/chainable_api.go | 315 +
vendor/gorm.io/gorm/clause/clause.go | 88 +
vendor/gorm.io/gorm/clause/delete.go | 23 +
vendor/gorm.io/gorm/clause/expression.go | 381 +
vendor/gorm.io/gorm/clause/from.go | 37 +
vendor/gorm.io/gorm/clause/group_by.go | 48 +
vendor/gorm.io/gorm/clause/insert.go | 39 +
vendor/gorm.io/gorm/clause/joins.go | 47 +
vendor/gorm.io/gorm/clause/limit.go | 48 +
vendor/gorm.io/gorm/clause/locking.go | 31 +
vendor/gorm.io/gorm/clause/on_conflict.go | 59 +
vendor/gorm.io/gorm/clause/order_by.go | 54 +
vendor/gorm.io/gorm/clause/returning.go | 34 +
vendor/gorm.io/gorm/clause/select.go | 59 +
vendor/gorm.io/gorm/clause/set.go | 60 +
vendor/gorm.io/gorm/clause/update.go | 38 +
vendor/gorm.io/gorm/clause/values.go | 45 +
vendor/gorm.io/gorm/clause/where.go | 190 +
vendor/gorm.io/gorm/clause/with.go | 3 +
vendor/gorm.io/gorm/errors.go | 44 +
vendor/gorm.io/gorm/finisher_api.go | 667 +
vendor/gorm.io/gorm/gorm.go | 469 +
vendor/gorm.io/gorm/interfaces.go | 84 +
vendor/gorm.io/gorm/logger/logger.go | 202 +
vendor/gorm.io/gorm/logger/sql.go | 147 +
vendor/gorm.io/gorm/migrator.go | 93 +
vendor/gorm.io/gorm/migrator/column_type.go | 107 +
vendor/gorm.io/gorm/migrator/migrator.go | 842 +
vendor/gorm.io/gorm/model.go | 15 +
vendor/gorm.io/gorm/prepare_stmt.go | 172 +
vendor/gorm.io/gorm/scan.go | 290 +
vendor/gorm.io/gorm/schema/check.go | 35 +
vendor/gorm.io/gorm/schema/field.go | 972 +
vendor/gorm.io/gorm/schema/index.go | 142 +
vendor/gorm.io/gorm/schema/interfaces.go | 36 +
vendor/gorm.io/gorm/schema/naming.go | 181 +
vendor/gorm.io/gorm/schema/pool.go | 62 +
vendor/gorm.io/gorm/schema/relationship.go | 638 +
vendor/gorm.io/gorm/schema/schema.go | 323 +
vendor/gorm.io/gorm/schema/serializer.go | 157 +
vendor/gorm.io/gorm/schema/utils.go | 199 +
vendor/gorm.io/gorm/soft_delete.go | 157 +
vendor/gorm.io/gorm/statement.go | 712 +
vendor/gorm.io/gorm/utils/utils.go | 122 +
vendor/modules.txt | 276 +
1763 files changed, 755342 insertions(+), 5 deletions(-)
create mode 100644 Dockerfile
create mode 100644 Makefile
create mode 100644 cmd/garm-cli/cmd/version.go
create mode 100644 scripts/build-static.sh
create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore
create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE
create mode 100644 vendor/github.com/BurntSushi/toml/COPYING
create mode 100644 vendor/github.com/BurntSushi/toml/README.md
create mode 100644 vendor/github.com/BurntSushi/toml/decode.go
create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go
create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go
create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go
create mode 100644 vendor/github.com/BurntSushi/toml/doc.go
create mode 100644 vendor/github.com/BurntSushi/toml/encode.go
create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go
create mode 100644 vendor/github.com/BurntSushi/toml/lex.go
create mode 100644 vendor/github.com/BurntSushi/toml/parse.go
create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go
create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go
create mode 100644 vendor/github.com/chzyer/readline/.gitignore
create mode 100644 vendor/github.com/chzyer/readline/.travis.yml
create mode 100644 vendor/github.com/chzyer/readline/CHANGELOG.md
create mode 100644 vendor/github.com/chzyer/readline/LICENSE
create mode 100644 vendor/github.com/chzyer/readline/README.md
create mode 100644 vendor/github.com/chzyer/readline/ansi_windows.go
create mode 100644 vendor/github.com/chzyer/readline/complete.go
create mode 100644 vendor/github.com/chzyer/readline/complete_helper.go
create mode 100644 vendor/github.com/chzyer/readline/complete_segment.go
create mode 100644 vendor/github.com/chzyer/readline/history.go
create mode 100644 vendor/github.com/chzyer/readline/operation.go
create mode 100644 vendor/github.com/chzyer/readline/password.go
create mode 100644 vendor/github.com/chzyer/readline/rawreader_windows.go
create mode 100644 vendor/github.com/chzyer/readline/readline.go
create mode 100644 vendor/github.com/chzyer/readline/remote.go
create mode 100644 vendor/github.com/chzyer/readline/runebuf.go
create mode 100644 vendor/github.com/chzyer/readline/runes.go
create mode 100644 vendor/github.com/chzyer/readline/search.go
create mode 100644 vendor/github.com/chzyer/readline/std.go
create mode 100644 vendor/github.com/chzyer/readline/std_windows.go
create mode 100644 vendor/github.com/chzyer/readline/term.go
create mode 100644 vendor/github.com/chzyer/readline/term_bsd.go
create mode 100644 vendor/github.com/chzyer/readline/term_linux.go
create mode 100644 vendor/github.com/chzyer/readline/term_solaris.go
create mode 100644 vendor/github.com/chzyer/readline/term_unix.go
create mode 100644 vendor/github.com/chzyer/readline/term_windows.go
create mode 100644 vendor/github.com/chzyer/readline/terminal.go
create mode 100644 vendor/github.com/chzyer/readline/utils.go
create mode 100644 vendor/github.com/chzyer/readline/utils_unix.go
create mode 100644 vendor/github.com/chzyer/readline/utils_windows.go
create mode 100644 vendor/github.com/chzyer/readline/vim.go
create mode 100644 vendor/github.com/chzyer/readline/windows_api.go
create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go
create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go
create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore
create mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml
create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt
create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile
create mode 100644 vendor/github.com/felixge/httpsnoop/README.md
create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go
create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go
create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
create mode 100644 vendor/github.com/flosch/pongo2/.gitattributes
create mode 100644 vendor/github.com/flosch/pongo2/.gitignore
create mode 100644 vendor/github.com/flosch/pongo2/.travis.yml
create mode 100644 vendor/github.com/flosch/pongo2/AUTHORS
create mode 100644 vendor/github.com/flosch/pongo2/LICENSE
create mode 100644 vendor/github.com/flosch/pongo2/README.md
create mode 100644 vendor/github.com/flosch/pongo2/context.go
create mode 100644 vendor/github.com/flosch/pongo2/doc.go
create mode 100644 vendor/github.com/flosch/pongo2/error.go
create mode 100644 vendor/github.com/flosch/pongo2/filters.go
create mode 100644 vendor/github.com/flosch/pongo2/filters_builtin.go
create mode 100644 vendor/github.com/flosch/pongo2/helpers.go
create mode 100644 vendor/github.com/flosch/pongo2/lexer.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes_html.go
create mode 100644 vendor/github.com/flosch/pongo2/nodes_wrapper.go
create mode 100644 vendor/github.com/flosch/pongo2/options.go
create mode 100644 vendor/github.com/flosch/pongo2/parser.go
create mode 100644 vendor/github.com/flosch/pongo2/parser_document.go
create mode 100644 vendor/github.com/flosch/pongo2/parser_expression.go
create mode 100644 vendor/github.com/flosch/pongo2/pongo2.go
create mode 100644 vendor/github.com/flosch/pongo2/tags.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_autoescape.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_block.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_comment.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_cycle.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_extends.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_filter.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_firstof.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_for.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_if.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifchanged.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifequal.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ifnotequal.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_import.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_include.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_lorem.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_macro.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_now.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_set.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_spaceless.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_ssi.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_templatetag.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_widthratio.go
create mode 100644 vendor/github.com/flosch/pongo2/tags_with.go
create mode 100644 vendor/github.com/flosch/pongo2/template.go
create mode 100644 vendor/github.com/flosch/pongo2/template_loader.go
create mode 100644 vendor/github.com/flosch/pongo2/template_sets.go
create mode 100644 vendor/github.com/flosch/pongo2/value.go
create mode 100644 vendor/github.com/flosch/pongo2/variable.go
create mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
create mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
create mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
create mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
create mode 100644 vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
create mode 100644 vendor/github.com/go-resty/resty/v2/.gitignore
create mode 100644 vendor/github.com/go-resty/resty/v2/BUILD.bazel
create mode 100644 vendor/github.com/go-resty/resty/v2/LICENSE
create mode 100644 vendor/github.com/go-resty/resty/v2/README.md
create mode 100644 vendor/github.com/go-resty/resty/v2/WORKSPACE
create mode 100644 vendor/github.com/go-resty/resty/v2/client.go
create mode 100644 vendor/github.com/go-resty/resty/v2/middleware.go
create mode 100644 vendor/github.com/go-resty/resty/v2/redirect.go
create mode 100644 vendor/github.com/go-resty/resty/v2/request.go
create mode 100644 vendor/github.com/go-resty/resty/v2/response.go
create mode 100644 vendor/github.com/go-resty/resty/v2/resty.go
create mode 100644 vendor/github.com/go-resty/resty/v2/retry.go
create mode 100644 vendor/github.com/go-resty/resty/v2/trace.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport.go
create mode 100644 vendor/github.com/go-resty/resty/v2/transport112.go
create mode 100644 vendor/github.com/go-resty/resty/v2/util.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore
create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS
create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE
create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md
create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/fuzz.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go
create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/.gitignore
create mode 100644 vendor/github.com/golang-jwt/jwt/LICENSE
create mode 100644 vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
create mode 100644 vendor/github.com/golang-jwt/jwt/README.md
create mode 100644 vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
create mode 100644 vendor/github.com/golang-jwt/jwt/claims.go
create mode 100644 vendor/github.com/golang-jwt/jwt/doc.go
create mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa.go
create mode 100644 vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/ed25519.go
create mode 100644 vendor/github.com/golang-jwt/jwt/ed25519_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/errors.go
create mode 100644 vendor/github.com/golang-jwt/jwt/hmac.go
create mode 100644 vendor/github.com/golang-jwt/jwt/map_claims.go
create mode 100644 vendor/github.com/golang-jwt/jwt/none.go
create mode 100644 vendor/github.com/golang-jwt/jwt/parser.go
create mode 100644 vendor/github.com/golang-jwt/jwt/rsa.go
create mode 100644 vendor/github.com/golang-jwt/jwt/rsa_pss.go
create mode 100644 vendor/github.com/golang-jwt/jwt/rsa_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/signing_method.go
create mode 100644 vendor/github.com/golang-jwt/jwt/token.go
create mode 100644 vendor/github.com/golang/protobuf/AUTHORS
create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS
create mode 100644 vendor/github.com/golang/protobuf/LICENSE
create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go
create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go
create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go
create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go
create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go
create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go
create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go
create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go
create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go
create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go
create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go
create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go
create mode 100644 vendor/github.com/google/go-github/v43/AUTHORS
create mode 100644 vendor/github.com/google/go-github/v43/LICENSE
create mode 100644 vendor/github.com/google/go-github/v43/github/actions.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_artifacts.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_runner_groups.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_runners.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_secrets.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_workflow_jobs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_workflow_runs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/actions_workflows.go
create mode 100644 vendor/github.com/google/go-github/v43/github/activity.go
create mode 100644 vendor/github.com/google/go-github/v43/github/activity_events.go
create mode 100644 vendor/github.com/google/go-github/v43/github/activity_notifications.go
create mode 100644 vendor/github.com/google/go-github/v43/github/activity_star.go
create mode 100644 vendor/github.com/google/go-github/v43/github/activity_watching.go
create mode 100644 vendor/github.com/google/go-github/v43/github/admin.go
create mode 100644 vendor/github.com/google/go-github/v43/github/admin_orgs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/admin_stats.go
create mode 100644 vendor/github.com/google/go-github/v43/github/admin_users.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps_hooks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps_hooks_deliveries.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps_installation.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps_manifest.go
create mode 100644 vendor/github.com/google/go-github/v43/github/apps_marketplace.go
create mode 100644 vendor/github.com/google/go-github/v43/github/authorizations.go
create mode 100644 vendor/github.com/google/go-github/v43/github/billing.go
create mode 100644 vendor/github.com/google/go-github/v43/github/checks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/code-scanning.go
create mode 100644 vendor/github.com/google/go-github/v43/github/dependabot.go
create mode 100644 vendor/github.com/google/go-github/v43/github/dependabot_secrets.go
create mode 100644 vendor/github.com/google/go-github/v43/github/doc.go
create mode 100644 vendor/github.com/google/go-github/v43/github/enterprise.go
create mode 100644 vendor/github.com/google/go-github/v43/github/enterprise_actions_runners.go
create mode 100644 vendor/github.com/google/go-github/v43/github/enterprise_audit_log.go
create mode 100644 vendor/github.com/google/go-github/v43/github/event.go
create mode 100644 vendor/github.com/google/go-github/v43/github/event_types.go
create mode 100644 vendor/github.com/google/go-github/v43/github/gists.go
create mode 100644 vendor/github.com/google/go-github/v43/github/gists_comments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git_blobs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git_commits.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git_refs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git_tags.go
create mode 100644 vendor/github.com/google/go-github/v43/github/git_trees.go
create mode 100644 vendor/github.com/google/go-github/v43/github/github-accessors.go
create mode 100644 vendor/github.com/google/go-github/v43/github/github.go
create mode 100644 vendor/github.com/google/go-github/v43/github/gitignore.go
create mode 100644 vendor/github.com/google/go-github/v43/github/interactions.go
create mode 100644 vendor/github.com/google/go-github/v43/github/interactions_orgs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/interactions_repos.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issue_import.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_assignees.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_comments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_events.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_labels.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_milestones.go
create mode 100644 vendor/github.com/google/go-github/v43/github/issues_timeline.go
create mode 100644 vendor/github.com/google/go-github/v43/github/licenses.go
create mode 100644 vendor/github.com/google/go-github/v43/github/messages.go
create mode 100644 vendor/github.com/google/go-github/v43/github/migrations.go
create mode 100644 vendor/github.com/google/go-github/v43/github/migrations_source_import.go
create mode 100644 vendor/github.com/google/go-github/v43/github/migrations_user.go
create mode 100644 vendor/github.com/google/go-github/v43/github/misc.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_actions_allowed.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_actions_permissions.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_audit_log.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_hooks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_hooks_deliveries.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_members.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_outside_collaborators.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_packages.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_projects.go
create mode 100644 vendor/github.com/google/go-github/v43/github/orgs_users_blocking.go
create mode 100644 vendor/github.com/google/go-github/v43/github/packages.go
create mode 100644 vendor/github.com/google/go-github/v43/github/projects.go
create mode 100644 vendor/github.com/google/go-github/v43/github/pulls.go
create mode 100644 vendor/github.com/google/go-github/v43/github/pulls_comments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/pulls_reviewers.go
create mode 100644 vendor/github.com/google/go-github/v43/github/pulls_reviews.go
create mode 100644 vendor/github.com/google/go-github/v43/github/reactions.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_autolinks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_collaborators.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_comments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_commits.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_community_health.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_contents.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_deployments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_environments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_forks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_hooks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_hooks_deliveries.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_invitations.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_keys.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_merging.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_pages.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_prereceive_hooks.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_projects.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_releases.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_stats.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_statuses.go
create mode 100644 vendor/github.com/google/go-github/v43/github/repos_traffic.go
create mode 100644 vendor/github.com/google/go-github/v43/github/scim.go
create mode 100644 vendor/github.com/google/go-github/v43/github/search.go
create mode 100644 vendor/github.com/google/go-github/v43/github/secret_scanning.go
create mode 100644 vendor/github.com/google/go-github/v43/github/strings.go
create mode 100644 vendor/github.com/google/go-github/v43/github/teams.go
create mode 100644 vendor/github.com/google/go-github/v43/github/teams_discussion_comments.go
create mode 100644 vendor/github.com/google/go-github/v43/github/teams_discussions.go
create mode 100644 vendor/github.com/google/go-github/v43/github/teams_members.go
create mode 100644 vendor/github.com/google/go-github/v43/github/timestamp.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_administration.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_blocking.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_emails.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_followers.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_gpg_keys.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_keys.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_packages.go
create mode 100644 vendor/github.com/google/go-github/v43/github/users_projects.go
create mode 100644 vendor/github.com/google/go-github/v43/github/with_appengine.go
create mode 100644 vendor/github.com/google/go-github/v43/github/without_appengine.go
create mode 100644 vendor/github.com/google/go-querystring/LICENSE
create mode 100644 vendor/github.com/google/go-querystring/query/encode.go
create mode 100644 vendor/github.com/google/uuid/.travis.yml
create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md
create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS
create mode 100644 vendor/github.com/google/uuid/LICENSE
create mode 100644 vendor/github.com/google/uuid/README.md
create mode 100644 vendor/github.com/google/uuid/dce.go
create mode 100644 vendor/github.com/google/uuid/doc.go
create mode 100644 vendor/github.com/google/uuid/hash.go
create mode 100644 vendor/github.com/google/uuid/marshal.go
create mode 100644 vendor/github.com/google/uuid/node.go
create mode 100644 vendor/github.com/google/uuid/node_js.go
create mode 100644 vendor/github.com/google/uuid/node_net.go
create mode 100644 vendor/github.com/google/uuid/null.go
create mode 100644 vendor/github.com/google/uuid/sql.go
create mode 100644 vendor/github.com/google/uuid/time.go
create mode 100644 vendor/github.com/google/uuid/util.go
create mode 100644 vendor/github.com/google/uuid/uuid.go
create mode 100644 vendor/github.com/google/uuid/version1.go
create mode 100644 vendor/github.com/google/uuid/version4.go
create mode 100644 vendor/github.com/gorilla/handlers/LICENSE
create mode 100644 vendor/github.com/gorilla/handlers/README.md
create mode 100644 vendor/github.com/gorilla/handlers/canonical.go
create mode 100644 vendor/github.com/gorilla/handlers/compress.go
create mode 100644 vendor/github.com/gorilla/handlers/cors.go
create mode 100644 vendor/github.com/gorilla/handlers/doc.go
create mode 100644 vendor/github.com/gorilla/handlers/handlers.go
create mode 100644 vendor/github.com/gorilla/handlers/logging.go
create mode 100644 vendor/github.com/gorilla/handlers/proxy_headers.go
create mode 100644 vendor/github.com/gorilla/handlers/recovery.go
create mode 100644 vendor/github.com/gorilla/mux/AUTHORS
create mode 100644 vendor/github.com/gorilla/mux/LICENSE
create mode 100644 vendor/github.com/gorilla/mux/README.md
create mode 100644 vendor/github.com/gorilla/mux/doc.go
create mode 100644 vendor/github.com/gorilla/mux/middleware.go
create mode 100644 vendor/github.com/gorilla/mux/mux.go
create mode 100644 vendor/github.com/gorilla/mux/regexp.go
create mode 100644 vendor/github.com/gorilla/mux/route.go
create mode 100644 vendor/github.com/gorilla/mux/test_helpers.go
create mode 100644 vendor/github.com/gorilla/websocket/.gitignore
create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS
create mode 100644 vendor/github.com/gorilla/websocket/LICENSE
create mode 100644 vendor/github.com/gorilla/websocket/README.md
create mode 100644 vendor/github.com/gorilla/websocket/client.go
create mode 100644 vendor/github.com/gorilla/websocket/compression.go
create mode 100644 vendor/github.com/gorilla/websocket/conn.go
create mode 100644 vendor/github.com/gorilla/websocket/doc.go
create mode 100644 vendor/github.com/gorilla/websocket/join.go
create mode 100644 vendor/github.com/gorilla/websocket/json.go
create mode 100644 vendor/github.com/gorilla/websocket/mask.go
create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go
create mode 100644 vendor/github.com/gorilla/websocket/prepared.go
create mode 100644 vendor/github.com/gorilla/websocket/proxy.go
create mode 100644 vendor/github.com/gorilla/websocket/server.go
create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go
create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go
create mode 100644 vendor/github.com/gorilla/websocket/util.go
create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE
create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go
create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/LICENSE
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/README.md
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/config.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/render.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/render_csv.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/render_html.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/render_markdown.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/sort.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/style.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/table.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/util.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/table/writer.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/align.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/ansi.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/ansi_unix.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/ansi_windows.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/color.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/color_html.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/cursor.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/filter.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/format.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/string.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/transformer.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/valign.go
create mode 100644 vendor/github.com/jedib0t/go-pretty/v6/text/wrap.go
create mode 100644 vendor/github.com/jinzhu/inflection/LICENSE
create mode 100644 vendor/github.com/jinzhu/inflection/README.md
create mode 100644 vendor/github.com/jinzhu/inflection/inflections.go
create mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml
create mode 100644 vendor/github.com/jinzhu/now/Guardfile
create mode 100644 vendor/github.com/jinzhu/now/License
create mode 100644 vendor/github.com/jinzhu/now/README.md
create mode 100644 vendor/github.com/jinzhu/now/main.go
create mode 100644 vendor/github.com/jinzhu/now/now.go
create mode 100644 vendor/github.com/jinzhu/now/time.go
create mode 100644 vendor/github.com/juju/webbrowser/.gitignore
create mode 100644 vendor/github.com/juju/webbrowser/LICENSE
create mode 100644 vendor/github.com/juju/webbrowser/README.md
create mode 100644 vendor/github.com/juju/webbrowser/webbrowser.go
create mode 100644 vendor/github.com/julienschmidt/httprouter/.travis.yml
create mode 100644 vendor/github.com/julienschmidt/httprouter/LICENSE
create mode 100644 vendor/github.com/julienschmidt/httprouter/README.md
create mode 100644 vendor/github.com/julienschmidt/httprouter/path.go
create mode 100644 vendor/github.com/julienschmidt/httprouter/router.go
create mode 100644 vendor/github.com/julienschmidt/httprouter/tree.go
create mode 100644 vendor/github.com/kballard/go-shellquote/LICENSE
create mode 100644 vendor/github.com/kballard/go-shellquote/README
create mode 100644 vendor/github.com/kballard/go-shellquote/doc.go
create mode 100644 vendor/github.com/kballard/go-shellquote/quote.go
create mode 100644 vendor/github.com/kballard/go-shellquote/unquote.go
create mode 100644 vendor/github.com/kr/fs/LICENSE
create mode 100644 vendor/github.com/kr/fs/Readme
create mode 100644 vendor/github.com/kr/fs/filesystem.go
create mode 100644 vendor/github.com/kr/fs/walk.go
create mode 100644 vendor/github.com/lxc/lxd/AUTHORS
create mode 100644 vendor/github.com/lxc/lxd/COPYING
create mode 100644 vendor/github.com/lxc/lxd/client/connection.go
create mode 100644 vendor/github.com/lxc/lxd/client/doc.go
create mode 100644 vendor/github.com/lxc/lxd/client/events.go
create mode 100644 vendor/github.com/lxc/lxd/client/interfaces.go
create mode 100644 vendor/github.com/lxc/lxd/client/interfaces_legacy.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_certificates.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_cluster.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_containers.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_events.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_images.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_instances.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_acls.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_forwards.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_peer.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_network_zones.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_networks.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_operations.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_profiles.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_projects.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_server.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_storage_pools.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_storage_volumes.go
create mode 100644 vendor/github.com/lxc/lxd/client/lxd_warnings.go
create mode 100644 vendor/github.com/lxc/lxd/client/operations.go
create mode 100644 vendor/github.com/lxc/lxd/client/simplestreams.go
create mode 100644 vendor/github.com/lxc/lxd/client/simplestreams_images.go
create mode 100644 vendor/github.com/lxc/lxd/client/util.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/consts.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/device_proxyaddress.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/device_runconfig.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices_sort.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/device/config/devices_utils.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/compiler.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/config.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/dummy.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/error_utils.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/file_utils.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/lxd.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/lxd_bpf.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/lxd_bpf_common.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/lxd_posix_acl_xattr.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/lxd_seccomp.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/macro.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/memory_utils.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/mount_utils.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/process_utils.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/syscall_numbers.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/include/syscall_wrappers.h
create mode 100644 vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_type.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/instance/instancetype/instance_vmagent.go
create mode 100644 vendor/github.com/lxc/lxd/lxd/revert/revert.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/certificate.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/cluster.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container_backup.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container_console.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container_exec.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container_snapshot.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/container_state.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/doc.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/error.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/event.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/image.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_backup.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_console.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_exec.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/instance_state.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/network.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/network_acl.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/network_forward.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/network_peer.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/network_zone.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/operation.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/profile.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/project.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/resource.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/response.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/server.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/status_code.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_backup.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_state.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/url.go
create mode 100644 vendor/github.com/lxc/lxd/shared/api/warning.go
create mode 100644 vendor/github.com/lxc/lxd/shared/archive.go
create mode 100644 vendor/github.com/lxc/lxd/shared/cancel/canceler.go
create mode 100644 vendor/github.com/lxc/lxd/shared/cert.go
create mode 100644 vendor/github.com/lxc/lxd/shared/cgo.go
create mode 100644 vendor/github.com/lxc/lxd/shared/instance.go
create mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/data.go
create mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/reader.go
create mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go
create mode 100644 vendor/github.com/lxc/lxd/shared/ioprogress/writer.go
create mode 100644 vendor/github.com/lxc/lxd/shared/json.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/format.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/log.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/syslog_linux.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/syslog_other.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/toplevel.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/types.go
create mode 100644 vendor/github.com/lxc/lxd/shared/logger/wrapper.go
create mode 100644 vendor/github.com/lxc/lxd/shared/network.go
create mode 100644 vendor/github.com/lxc/lxd/shared/network_ip.go
create mode 100644 vendor/github.com/lxc/lxd/shared/network_unix.go
create mode 100644 vendor/github.com/lxc/lxd/shared/network_windows.go
create mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures.go
create mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures_linux.go
create mode 100644 vendor/github.com/lxc/lxd/shared/osarch/architectures_others.go
create mode 100644 vendor/github.com/lxc/lxd/shared/osarch/release.go
create mode 100644 vendor/github.com/lxc/lxd/shared/proxy.go
create mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/index.go
create mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/products.go
create mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/simplestreams.go
create mode 100644 vendor/github.com/lxc/lxd/shared/simplestreams/sort.go
create mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user.go
create mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeout_user_noop.go
create mode 100644 vendor/github.com/lxc/lxd/shared/tcp/tcp_timeouts.go
create mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios.go
create mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios_linux.go
create mode 100644 vendor/github.com/lxc/lxd/shared/termios/termios_other.go
create mode 100644 vendor/github.com/lxc/lxd/shared/units/units.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util_linux.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util_linux_cgo.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util_linux_notcgo.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util_unix.go
create mode 100644 vendor/github.com/lxc/lxd/shared/util_windows.go
create mode 100644 vendor/github.com/lxc/lxd/shared/validate/validate.go
create mode 100644 vendor/github.com/manifoldco/promptui/.gitignore
create mode 100644 vendor/github.com/manifoldco/promptui/.golangci.yml
create mode 100644 vendor/github.com/manifoldco/promptui/.travis.yml
create mode 100644 vendor/github.com/manifoldco/promptui/CHANGELOG.md
create mode 100644 vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md
create mode 100644 vendor/github.com/manifoldco/promptui/LICENSE.md
create mode 100644 vendor/github.com/manifoldco/promptui/Makefile
create mode 100644 vendor/github.com/manifoldco/promptui/README.md
create mode 100644 vendor/github.com/manifoldco/promptui/codes.go
create mode 100644 vendor/github.com/manifoldco/promptui/cursor.go
create mode 100644 vendor/github.com/manifoldco/promptui/keycodes.go
create mode 100644 vendor/github.com/manifoldco/promptui/keycodes_other.go
create mode 100644 vendor/github.com/manifoldco/promptui/keycodes_windows.go
create mode 100644 vendor/github.com/manifoldco/promptui/list/list.go
create mode 100644 vendor/github.com/manifoldco/promptui/prompt.go
create mode 100644 vendor/github.com/manifoldco/promptui/promptui.go
create mode 100644 vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go
create mode 100644 vendor/github.com/manifoldco/promptui/select.go
create mode 100644 vendor/github.com/manifoldco/promptui/styles.go
create mode 100644 vendor/github.com/manifoldco/promptui/styles_windows.go
create mode 100644 vendor/github.com/mattn/go-runewidth/.travis.yml
create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE
create mode 100644 vendor/github.com/mattn/go-runewidth/README.md
create mode 100644 vendor/github.com/mattn/go-runewidth/go.test.sh
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go
create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/.codecov.yml
create mode 100644 vendor/github.com/mattn/go-sqlite3/.gitignore
create mode 100644 vendor/github.com/mattn/go-sqlite3/LICENSE
create mode 100644 vendor/github.com/mattn/go-sqlite3/README.md
create mode 100644 vendor/github.com/mattn/go-sqlite3/backup.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/callback.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/convert.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/doc.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/error.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_context.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_json1.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_other.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_type.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go
create mode 100644 vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
create mode 100644 vendor/github.com/mattn/go-sqlite3/static_mock.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/.gitignore
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/Makefile
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/README.md
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/match/match.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go
create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go
create mode 100644 vendor/github.com/pborman/uuid/.travis.yml
create mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTING.md
create mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS
create mode 100644 vendor/github.com/pborman/uuid/LICENSE
create mode 100644 vendor/github.com/pborman/uuid/README.md
create mode 100644 vendor/github.com/pborman/uuid/dce.go
create mode 100644 vendor/github.com/pborman/uuid/doc.go
create mode 100644 vendor/github.com/pborman/uuid/hash.go
create mode 100644 vendor/github.com/pborman/uuid/marshal.go
create mode 100644 vendor/github.com/pborman/uuid/node.go
create mode 100644 vendor/github.com/pborman/uuid/sql.go
create mode 100644 vendor/github.com/pborman/uuid/time.go
create mode 100644 vendor/github.com/pborman/uuid/util.go
create mode 100644 vendor/github.com/pborman/uuid/uuid.go
create mode 100644 vendor/github.com/pborman/uuid/version1.go
create mode 100644 vendor/github.com/pborman/uuid/version4.go
create mode 100644 vendor/github.com/pkg/errors/.gitignore
create mode 100644 vendor/github.com/pkg/errors/.travis.yml
create mode 100644 vendor/github.com/pkg/errors/LICENSE
create mode 100644 vendor/github.com/pkg/errors/Makefile
create mode 100644 vendor/github.com/pkg/errors/README.md
create mode 100644 vendor/github.com/pkg/errors/appveyor.yml
create mode 100644 vendor/github.com/pkg/errors/errors.go
create mode 100644 vendor/github.com/pkg/errors/go113.go
create mode 100644 vendor/github.com/pkg/errors/stack.go
create mode 100644 vendor/github.com/pkg/sftp/.gitignore
create mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS
create mode 100644 vendor/github.com/pkg/sftp/LICENSE
create mode 100644 vendor/github.com/pkg/sftp/Makefile
create mode 100644 vendor/github.com/pkg/sftp/README.md
create mode 100644 vendor/github.com/pkg/sftp/allocator.go
create mode 100644 vendor/github.com/pkg/sftp/attrs.go
create mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go
create mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go
create mode 100644 vendor/github.com/pkg/sftp/client.go
create mode 100644 vendor/github.com/pkg/sftp/conn.go
create mode 100644 vendor/github.com/pkg/sftp/debug.go
create mode 100644 vendor/github.com/pkg/sftp/fuzz.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go
create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go
create mode 100644 vendor/github.com/pkg/sftp/ls_formatting.go
create mode 100644 vendor/github.com/pkg/sftp/ls_plan9.go
create mode 100644 vendor/github.com/pkg/sftp/ls_stub.go
create mode 100644 vendor/github.com/pkg/sftp/ls_unix.go
create mode 100644 vendor/github.com/pkg/sftp/match.go
create mode 100644 vendor/github.com/pkg/sftp/packet-manager.go
create mode 100644 vendor/github.com/pkg/sftp/packet-typing.go
create mode 100644 vendor/github.com/pkg/sftp/packet.go
create mode 100644 vendor/github.com/pkg/sftp/pool.go
create mode 100644 vendor/github.com/pkg/sftp/release.go
create mode 100644 vendor/github.com/pkg/sftp/request-attrs.go
create mode 100644 vendor/github.com/pkg/sftp/request-errors.go
create mode 100644 vendor/github.com/pkg/sftp/request-example.go
create mode 100644 vendor/github.com/pkg/sftp/request-interfaces.go
create mode 100644 vendor/github.com/pkg/sftp/request-plan9.go
create mode 100644 vendor/github.com/pkg/sftp/request-readme.md
create mode 100644 vendor/github.com/pkg/sftp/request-server.go
create mode 100644 vendor/github.com/pkg/sftp/request-unix.go
create mode 100644 vendor/github.com/pkg/sftp/request.go
create mode 100644 vendor/github.com/pkg/sftp/request_windows.go
create mode 100644 vendor/github.com/pkg/sftp/server.go
create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go
create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go
create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go
create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_plan9.go
create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go
create mode 100644 vendor/github.com/pkg/sftp/sftp.go
create mode 100644 vendor/github.com/pkg/sftp/stat_plan9.go
create mode 100644 vendor/github.com/pkg/sftp/stat_posix.go
create mode 100644 vendor/github.com/pkg/sftp/syscall_fixed.go
create mode 100644 vendor/github.com/pkg/sftp/syscall_good.go
create mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE
create mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go
create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt
create mode 100644 vendor/github.com/rivo/uniseg/README.md
create mode 100644 vendor/github.com/rivo/uniseg/doc.go
create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go
create mode 100644 vendor/github.com/rivo/uniseg/properties.go
create mode 100644 vendor/github.com/robfig/cron/v3/.gitignore
create mode 100644 vendor/github.com/robfig/cron/v3/.travis.yml
create mode 100644 vendor/github.com/robfig/cron/v3/LICENSE
create mode 100644 vendor/github.com/robfig/cron/v3/README.md
create mode 100644 vendor/github.com/robfig/cron/v3/chain.go
create mode 100644 vendor/github.com/robfig/cron/v3/constantdelay.go
create mode 100644 vendor/github.com/robfig/cron/v3/cron.go
create mode 100644 vendor/github.com/robfig/cron/v3/doc.go
create mode 100644 vendor/github.com/robfig/cron/v3/logger.go
create mode 100644 vendor/github.com/robfig/cron/v3/option.go
create mode 100644 vendor/github.com/robfig/cron/v3/parser.go
create mode 100644 vendor/github.com/robfig/cron/v3/spec.go
create mode 100644 vendor/github.com/rogpeppe/fastuuid/LICENSE
create mode 100644 vendor/github.com/rogpeppe/fastuuid/README.md
create mode 100644 vendor/github.com/rogpeppe/fastuuid/uuid.go
create mode 100644 vendor/github.com/satori/go.uuid/.travis.yml
create mode 100644 vendor/github.com/satori/go.uuid/LICENSE
create mode 100644 vendor/github.com/satori/go.uuid/README.md
create mode 100644 vendor/github.com/satori/go.uuid/codec.go
create mode 100644 vendor/github.com/satori/go.uuid/generator.go
create mode 100644 vendor/github.com/satori/go.uuid/sql.go
create mode 100644 vendor/github.com/satori/go.uuid/uuid.go
create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore
create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml
create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml
create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md
create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE
create mode 100644 vendor/github.com/sirupsen/logrus/README.md
create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go
create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml
create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go
create mode 100644 vendor/github.com/sirupsen/logrus/doc.go
create mode 100644 vendor/github.com/sirupsen/logrus/entry.go
create mode 100644 vendor/github.com/sirupsen/logrus/exported.go
create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go
create mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
create mode 100644 vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
create mode 100644 vendor/github.com/sirupsen/logrus/hooks/writer/README.md
create mode 100644 vendor/github.com/sirupsen/logrus/hooks/writer/writer.go
create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/logger.go
create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go
create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go
create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go
create mode 100644 vendor/github.com/sirupsen/logrus/writer.go
create mode 100644 vendor/github.com/spf13/cobra/.gitignore
create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml
create mode 100644 vendor/github.com/spf13/cobra/.mailmap
create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md
create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md
create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt
create mode 100644 vendor/github.com/spf13/cobra/MAINTAINERS
create mode 100644 vendor/github.com/spf13/cobra/Makefile
create mode 100644 vendor/github.com/spf13/cobra/README.md
create mode 100644 vendor/github.com/spf13/cobra/args.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go
create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md
create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go
create mode 100644 vendor/github.com/spf13/cobra/cobra.go
create mode 100644 vendor/github.com/spf13/cobra/command.go
create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go
create mode 100644 vendor/github.com/spf13/cobra/command_win.go
create mode 100644 vendor/github.com/spf13/cobra/completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go
create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md
create mode 100644 vendor/github.com/spf13/cobra/flag_groups.go
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go
create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md
create mode 100644 vendor/github.com/spf13/cobra/user_guide.md
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go
create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md
create mode 100644 vendor/github.com/spf13/pflag/.gitignore
create mode 100644 vendor/github.com/spf13/pflag/.travis.yml
create mode 100644 vendor/github.com/spf13/pflag/LICENSE
create mode 100644 vendor/github.com/spf13/pflag/README.md
create mode 100644 vendor/github.com/spf13/pflag/bool.go
create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go
create mode 100644 vendor/github.com/spf13/pflag/bytes.go
create mode 100644 vendor/github.com/spf13/pflag/count.go
create mode 100644 vendor/github.com/spf13/pflag/duration.go
create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go
create mode 100644 vendor/github.com/spf13/pflag/flag.go
create mode 100644 vendor/github.com/spf13/pflag/float32.go
create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/float64.go
create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/golangflag.go
create mode 100644 vendor/github.com/spf13/pflag/int.go
create mode 100644 vendor/github.com/spf13/pflag/int16.go
create mode 100644 vendor/github.com/spf13/pflag/int32.go
create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int64.go
create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go
create mode 100644 vendor/github.com/spf13/pflag/int8.go
create mode 100644 vendor/github.com/spf13/pflag/int_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ip.go
create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go
create mode 100644 vendor/github.com/spf13/pflag/ipmask.go
create mode 100644 vendor/github.com/spf13/pflag/ipnet.go
create mode 100644 vendor/github.com/spf13/pflag/string.go
create mode 100644 vendor/github.com/spf13/pflag/string_array.go
create mode 100644 vendor/github.com/spf13/pflag/string_slice.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go
create mode 100644 vendor/github.com/spf13/pflag/uint.go
create mode 100644 vendor/github.com/spf13/pflag/uint16.go
create mode 100644 vendor/github.com/spf13/pflag/uint32.go
create mode 100644 vendor/github.com/spf13/pflag/uint64.go
create mode 100644 vendor/github.com/spf13/pflag/uint8.go
create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go
create mode 100644 vendor/github.com/stretchr/testify/LICENSE
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_order.go
create mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/doc.go
create mode 100644 vendor/github.com/stretchr/testify/assert/errors.go
create mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go
create mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go
create mode 100644 vendor/golang.org/x/crypto/AUTHORS
create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/crypto/LICENSE
create mode 100644 vendor/golang.org/x/crypto/PATENTS
create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go
create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go
create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go
create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go
create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go
create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go
create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
create mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint
create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh
create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go
create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
create mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go
create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go
create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go
create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go
create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go
create mode 100644 vendor/golang.org/x/crypto/ssh/client.go
create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go
create mode 100644 vendor/golang.org/x/crypto/ssh/common.go
create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go
create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go
create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go
create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go
create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go
create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go
create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go
create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go
create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go
create mode 100644 vendor/golang.org/x/crypto/ssh/server.go
create mode 100644 vendor/golang.org/x/crypto/ssh/session.go
create mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go
create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go
create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go
create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go
create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go
create mode 100644 vendor/golang.org/x/net/AUTHORS
create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/net/LICENSE
create mode 100644 vendor/golang.org/x/net/PATENTS
create mode 100644 vendor/golang.org/x/net/context/context.go
create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
create mode 100644 vendor/golang.org/x/net/context/go17.go
create mode 100644 vendor/golang.org/x/net/context/go19.go
create mode 100644 vendor/golang.org/x/net/context/pre_go17.go
create mode 100644 vendor/golang.org/x/net/context/pre_go19.go
create mode 100644 vendor/golang.org/x/net/html/atom/atom.go
create mode 100644 vendor/golang.org/x/net/html/atom/table.go
create mode 100644 vendor/golang.org/x/net/html/const.go
create mode 100644 vendor/golang.org/x/net/html/doc.go
create mode 100644 vendor/golang.org/x/net/html/doctype.go
create mode 100644 vendor/golang.org/x/net/html/entity.go
create mode 100644 vendor/golang.org/x/net/html/escape.go
create mode 100644 vendor/golang.org/x/net/html/foreign.go
create mode 100644 vendor/golang.org/x/net/html/node.go
create mode 100644 vendor/golang.org/x/net/html/parse.go
create mode 100644 vendor/golang.org/x/net/html/render.go
create mode 100644 vendor/golang.org/x/net/html/token.go
create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go
create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go
create mode 100644 vendor/golang.org/x/oauth2/.travis.yml
create mode 100644 vendor/golang.org/x/oauth2/AUTHORS
create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md
create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/oauth2/LICENSE
create mode 100644 vendor/golang.org/x/oauth2/README.md
create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go
create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go
create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go
create mode 100644 vendor/golang.org/x/oauth2/internal/token.go
create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go
create mode 100644 vendor/golang.org/x/oauth2/oauth2.go
create mode 100644 vendor/golang.org/x/oauth2/token.go
create mode 100644 vendor/golang.org/x/oauth2/transport.go
create mode 100644 vendor/golang.org/x/sys/AUTHORS
create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/sys/LICENSE
create mode 100644 vendor/golang.org/x/sys/PATENTS
create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go
create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
create mode 100644 vendor/golang.org/x/sys/plan9/asm.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_386.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s
create mode 100644 vendor/golang.org/x/sys/plan9/asm_plan9_arm.s
create mode 100644 vendor/golang.org/x/sys/plan9/const_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/dir_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/env_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/errors_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/mkall.sh
create mode 100644 vendor/golang.org/x/sys/plan9/mkerrors.sh
create mode 100644 vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh
create mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/pwd_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/race.go
create mode 100644 vendor/golang.org/x/sys/plan9/race0.go
create mode 100644 vendor/golang.org/x/sys/plan9/str.go
create mode 100644 vendor/golang.org/x/sys/plan9/syscall.go
create mode 100644 vendor/golang.org/x/sys/plan9/syscall_plan9.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
create mode 100644 vendor/golang.org/x/sys/plan9/zsysnum_plan9.go
create mode 100644 vendor/golang.org/x/sys/unix/.gitignore
create mode 100644 vendor/golang.org/x/sys/unix/README.md
create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/aliases.go
create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s
create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/constants.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go
create mode 100644 vendor/golang.org/x/sys/unix/dirent.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go
create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go
create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
create mode 100644 vendor/golang.org/x/sys/unix/fdset.go
create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c
create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go
create mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go
create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh
create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh
create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go
create mode 100644 vendor/golang.org/x/sys/unix/race.go
create mode 100644 vendor/golang.org/x/sys/unix/race0.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go
create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go
create mode 100644 vendor/golang.org/x/sys/unix/str.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_alarm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix.go
create mode 100644 vendor/golang.org/x/sys/unix/sysvshm_unix_other.go
create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go
create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go
create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
create mode 100644 vendor/golang.org/x/sys/windows/aliases.go
create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/empty.s
create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go
create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash
create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash
create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go
create mode 100644 vendor/golang.org/x/sys/windows/race.go
create mode 100644 vendor/golang.org/x/sys/windows/race0.go
create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/service.go
create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/str.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall.go
create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go
create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go
create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go
create mode 100644 vendor/golang.org/x/term/AUTHORS
create mode 100644 vendor/golang.org/x/term/CONTRIBUTING.md
create mode 100644 vendor/golang.org/x/term/CONTRIBUTORS
create mode 100644 vendor/golang.org/x/term/LICENSE
create mode 100644 vendor/golang.org/x/term/PATENTS
create mode 100644 vendor/golang.org/x/term/README.md
create mode 100644 vendor/golang.org/x/term/codereview.cfg
create mode 100644 vendor/golang.org/x/term/term.go
create mode 100644 vendor/golang.org/x/term/term_plan9.go
create mode 100644 vendor/golang.org/x/term/term_unix.go
create mode 100644 vendor/golang.org/x/term/term_unix_bsd.go
create mode 100644 vendor/golang.org/x/term/term_unix_other.go
create mode 100644 vendor/golang.org/x/term/term_unsupported.go
create mode 100644 vendor/golang.org/x/term/term_windows.go
create mode 100644 vendor/golang.org/x/term/terminal.go
create mode 100644 vendor/google.golang.org/appengine/LICENSE
create mode 100644 vendor/google.golang.org/appengine/internal/api.go
create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go
create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go
create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go
create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto
create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
create mode 100644 vendor/google.golang.org/appengine/internal/identity.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go
create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go
create mode 100644 vendor/google.golang.org/appengine/internal/internal.go
create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto
create mode 100644 vendor/google.golang.org/appengine/internal/main.go
create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go
create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go
create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go
create mode 100644 vendor/google.golang.org/appengine/internal/net.go
create mode 100644 vendor/google.golang.org/appengine/internal/regen.sh
create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go
create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go
create mode 100644 vendor/google.golang.org/protobuf/AUTHORS
create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS
create mode 100644 vendor/google.golang.org/protobuf/LICENSE
create mode 100644 vendor/google.golang.org/protobuf/PATENTS
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go
create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go
create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go
create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go
create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go
create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go
create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go
create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go
create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go
create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go
create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go
create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go
create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go
create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go
create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go
create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go
create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go
create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go
create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go
create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go
create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go
create mode 100644 vendor/google.golang.org/protobuf/proto/size.go
create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go
create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
create mode 100644 vendor/gopkg.in/errgo.v1/LICENSE
create mode 100644 vendor/gopkg.in/errgo.v1/README.md
create mode 100644 vendor/gopkg.in/errgo.v1/errors.go
create mode 100644 vendor/gopkg.in/httprequest.v1/.travis.yml
create mode 100644 vendor/gopkg.in/httprequest.v1/LICENSE
create mode 100644 vendor/gopkg.in/httprequest.v1/README.md
create mode 100644 vendor/gopkg.in/httprequest.v1/client.go
create mode 100644 vendor/gopkg.in/httprequest.v1/error.go
create mode 100644 vendor/gopkg.in/httprequest.v1/fancyerror.go
create mode 100644 vendor/gopkg.in/httprequest.v1/handler.go
create mode 100644 vendor/gopkg.in/httprequest.v1/marshal.go
create mode 100644 vendor/gopkg.in/httprequest.v1/type.go
create mode 100644 vendor/gopkg.in/httprequest.v1/unmarshal.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/LICENSE
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/bakery.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/checker.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/checkers/checkers.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/checkers/declared.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/checkers/namespace.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/checkers/time.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/codec.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/discharge.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/dischargeall.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/doc.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/error.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/keys.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/logger.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/macaroon.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/oven.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/slice.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/store.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/bakery/version.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/browser.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/checkers.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/client.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/context_go17.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/context_prego17.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/discharge.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/dischargeclient_generated.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/error.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/keyring.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/oven.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/request.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/httpbakery/visitor.go
create mode 100644 vendor/gopkg.in/macaroon-bakery.v2/internal/httputil/relativeurl.go
create mode 100644 vendor/gopkg.in/macaroon.v2/.gitignore
create mode 100644 vendor/gopkg.in/macaroon.v2/.travis.yml
create mode 100644 vendor/gopkg.in/macaroon.v2/LICENSE
create mode 100644 vendor/gopkg.in/macaroon.v2/README.md
create mode 100644 vendor/gopkg.in/macaroon.v2/TODO
create mode 100644 vendor/gopkg.in/macaroon.v2/crypto.go
create mode 100644 vendor/gopkg.in/macaroon.v2/dependencies.tsv
create mode 100644 vendor/gopkg.in/macaroon.v2/macaroon.go
create mode 100644 vendor/gopkg.in/macaroon.v2/marshal-v1.go
create mode 100644 vendor/gopkg.in/macaroon.v2/marshal-v2.go
create mode 100644 vendor/gopkg.in/macaroon.v2/marshal.go
create mode 100644 vendor/gopkg.in/macaroon.v2/packet-v1.go
create mode 100644 vendor/gopkg.in/macaroon.v2/packet-v2.go
create mode 100644 vendor/gopkg.in/macaroon.v2/trace.go
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/README.md
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
create mode 100644 vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE
create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE
create mode 100644 vendor/gopkg.in/yaml.v3/README.md
create mode 100644 vendor/gopkg.in/yaml.v3/apic.go
create mode 100644 vendor/gopkg.in/yaml.v3/decode.go
create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go
create mode 100644 vendor/gopkg.in/yaml.v3/encode.go
create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go
create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go
create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go
create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go
create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go
create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go
create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go
create mode 100644 vendor/gorm.io/driver/mysql/License
create mode 100644 vendor/gorm.io/driver/mysql/README.md
create mode 100644 vendor/gorm.io/driver/mysql/migrator.go
create mode 100644 vendor/gorm.io/driver/mysql/mysql.go
create mode 100644 vendor/gorm.io/driver/sqlite/License
create mode 100644 vendor/gorm.io/driver/sqlite/README.md
create mode 100644 vendor/gorm.io/driver/sqlite/ddlmod.go
create mode 100644 vendor/gorm.io/driver/sqlite/errors.go
create mode 100644 vendor/gorm.io/driver/sqlite/migrator.go
create mode 100644 vendor/gorm.io/driver/sqlite/sqlite.go
create mode 100644 vendor/gorm.io/gorm/.gitignore
create mode 100644 vendor/gorm.io/gorm/.golangci.yml
create mode 100644 vendor/gorm.io/gorm/License
create mode 100644 vendor/gorm.io/gorm/README.md
create mode 100644 vendor/gorm.io/gorm/association.go
create mode 100644 vendor/gorm.io/gorm/callbacks.go
create mode 100644 vendor/gorm.io/gorm/callbacks/associations.go
create mode 100644 vendor/gorm.io/gorm/callbacks/callbacks.go
create mode 100644 vendor/gorm.io/gorm/callbacks/callmethod.go
create mode 100644 vendor/gorm.io/gorm/callbacks/create.go
create mode 100644 vendor/gorm.io/gorm/callbacks/delete.go
create mode 100644 vendor/gorm.io/gorm/callbacks/helper.go
create mode 100644 vendor/gorm.io/gorm/callbacks/interfaces.go
create mode 100644 vendor/gorm.io/gorm/callbacks/preload.go
create mode 100644 vendor/gorm.io/gorm/callbacks/query.go
create mode 100644 vendor/gorm.io/gorm/callbacks/raw.go
create mode 100644 vendor/gorm.io/gorm/callbacks/row.go
create mode 100644 vendor/gorm.io/gorm/callbacks/transaction.go
create mode 100644 vendor/gorm.io/gorm/callbacks/update.go
create mode 100644 vendor/gorm.io/gorm/chainable_api.go
create mode 100644 vendor/gorm.io/gorm/clause/clause.go
create mode 100644 vendor/gorm.io/gorm/clause/delete.go
create mode 100644 vendor/gorm.io/gorm/clause/expression.go
create mode 100644 vendor/gorm.io/gorm/clause/from.go
create mode 100644 vendor/gorm.io/gorm/clause/group_by.go
create mode 100644 vendor/gorm.io/gorm/clause/insert.go
create mode 100644 vendor/gorm.io/gorm/clause/joins.go
create mode 100644 vendor/gorm.io/gorm/clause/limit.go
create mode 100644 vendor/gorm.io/gorm/clause/locking.go
create mode 100644 vendor/gorm.io/gorm/clause/on_conflict.go
create mode 100644 vendor/gorm.io/gorm/clause/order_by.go
create mode 100644 vendor/gorm.io/gorm/clause/returning.go
create mode 100644 vendor/gorm.io/gorm/clause/select.go
create mode 100644 vendor/gorm.io/gorm/clause/set.go
create mode 100644 vendor/gorm.io/gorm/clause/update.go
create mode 100644 vendor/gorm.io/gorm/clause/values.go
create mode 100644 vendor/gorm.io/gorm/clause/where.go
create mode 100644 vendor/gorm.io/gorm/clause/with.go
create mode 100644 vendor/gorm.io/gorm/errors.go
create mode 100644 vendor/gorm.io/gorm/finisher_api.go
create mode 100644 vendor/gorm.io/gorm/gorm.go
create mode 100644 vendor/gorm.io/gorm/interfaces.go
create mode 100644 vendor/gorm.io/gorm/logger/logger.go
create mode 100644 vendor/gorm.io/gorm/logger/sql.go
create mode 100644 vendor/gorm.io/gorm/migrator.go
create mode 100644 vendor/gorm.io/gorm/migrator/column_type.go
create mode 100644 vendor/gorm.io/gorm/migrator/migrator.go
create mode 100644 vendor/gorm.io/gorm/model.go
create mode 100644 vendor/gorm.io/gorm/prepare_stmt.go
create mode 100644 vendor/gorm.io/gorm/scan.go
create mode 100644 vendor/gorm.io/gorm/schema/check.go
create mode 100644 vendor/gorm.io/gorm/schema/field.go
create mode 100644 vendor/gorm.io/gorm/schema/index.go
create mode 100644 vendor/gorm.io/gorm/schema/interfaces.go
create mode 100644 vendor/gorm.io/gorm/schema/naming.go
create mode 100644 vendor/gorm.io/gorm/schema/pool.go
create mode 100644 vendor/gorm.io/gorm/schema/relationship.go
create mode 100644 vendor/gorm.io/gorm/schema/schema.go
create mode 100644 vendor/gorm.io/gorm/schema/serializer.go
create mode 100644 vendor/gorm.io/gorm/schema/utils.go
create mode 100644 vendor/gorm.io/gorm/soft_delete.go
create mode 100644 vendor/gorm.io/gorm/statement.go
create mode 100644 vendor/gorm.io/gorm/utils/utils.go
create mode 100644 vendor/modules.txt
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..07fc0d72
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:alpine
+
+WORKDIR /root
+USER root
+
+RUN apk add musl-dev gcc libtool m4 autoconf g++ make libblkid util-linux-dev git linux-headers mingw-w64-gcc
+
+ADD ./scripts/build-static.sh /build-static.sh
+RUN chmod +x /build-static.sh
+
+CMD ["/bin/sh"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..b62cc2ae
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,11 @@
+SHELL := bash
+
+.PHONY : build-static
+
+IMAGE_TAG = garm-build
+
+build-static:
+ @echo Building metal hub
+ docker build --tag $(IMAGE_TAG) .
+ docker run --rm -e USER_ID="$(shell id -u)" -e USER_GROUP="$(shell id -g)" -v $(PWD):/build/garm $(IMAGE_TAG) /build-static.sh
+ @echo Binaries are available in $(PWD)/bin
diff --git a/cmd/garm-cli/cmd/root.go b/cmd/garm-cli/cmd/root.go
index 7fb4f818..a8b24321 100644
--- a/cmd/garm-cli/cmd/root.go
+++ b/cmd/garm-cli/cmd/root.go
@@ -23,6 +23,8 @@ import (
"github.com/spf13/cobra"
)
+var Version string
+
var (
cfg *config.Config
mgr config.Manager
diff --git a/cmd/garm-cli/cmd/version.go b/cmd/garm-cli/cmd/version.go
new file mode 100644
index 00000000..99253aed
--- /dev/null
+++ b/cmd/garm-cli/cmd/version.go
@@ -0,0 +1,35 @@
+// Copyright 2022 Cloudbase Solutions SRL
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+)
+
+// runnerCmd represents the runner command
+var versionCmd = &cobra.Command{
+ Use: "version",
+ SilenceUsage: true,
+ Short: "Print version and exit",
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println(Version)
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(versionCmd)
+}
diff --git a/runner/pool/common.go b/runner/pool/common.go
index 86571e9b..40c449fe 100644
--- a/runner/pool/common.go
+++ b/runner/pool/common.go
@@ -17,17 +17,18 @@ package pool
import (
"context"
"fmt"
+ "log"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
"garm/auth"
dbCommon "garm/database/common"
runnerErrors "garm/errors"
"garm/params"
"garm/runner/common"
providerCommon "garm/runner/providers/common"
- "log"
- "net/http"
- "strings"
- "sync"
- "time"
"github.com/google/go-github/v43/github"
"github.com/google/uuid"
diff --git a/scripts/build-static.sh b/scripts/build-static.sh
new file mode 100644
index 00000000..00ce6302
--- /dev/null
+++ b/scripts/build-static.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+GARM_SOURCE="/build/garm"
+BIN_DIR="$GARM_SOURCE/bin"
+git config --global --add safe.directory "$GARM_SOURCE"
+
+[ ! -d "$BIN_DIR" ] && mkdir -p "$BIN_DIR"
+
+export CGO_ENABLED=1
+USER_ID=${USER_ID:-$UID}
+USER_GROUP=${USER_GROUP:-$(id -g)}
+
+cd $GARM_SOURCE/cmd/garm
+go build -mod vendor -o $BIN_DIR/garm -tags osusergo,netgo,sqlite_omit_load_extension -ldflags "-linkmode external -extldflags '-static' -s -w -X main.Version=$(git describe --always --dirty)" .
+
+cd $GARM_SOURCE/cmd/garm-cli
+go build -mod vendor -o $BIN_DIR/garm-cli -tags osusergo,netgo -ldflags "-linkmode external -extldflags '-static' -s -w -X garm/cmd/garm-cli/cmd.Version=$(git describe --always --dirty)" .
+GOOS=windows CGO_ENABLED=0 go build -mod vendor -o $BIN_DIR/garm-cli.exe -ldflags "-s -w -X garm/cmd/garm-cli/cmd.Version=$(git describe --always --dirty)" .
+
+chown $USER_ID:$USER_GROUP -R "$BIN_DIR"
\ No newline at end of file
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
new file mode 100644
index 00000000..cd11be96
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -0,0 +1,2 @@
+toml.test
+/toml-test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 00000000..f621b011
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1 @@
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 00000000..01b57432
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 TOML authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 00000000..64410cf7
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
+
+Documentation: https://godocs.io/github.com/BurntSushi/toml
+
+See the [releases page](https://github.com/BurntSushi/toml/releases) for a
+changelog; this information is also in the git tag annotations (e.g. `git show
+v0.4.0`).
+
+This library requires Go 1.13 or newer; install it with:
+
+ $ go get github.com/BurntSushi/toml
+
+It also comes with a TOML validator CLI tool:
+
+ $ go get github.com/BurntSushi/toml/cmd/tomlv
+ $ tomlv some-toml-file.toml
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles XML and
+JSON. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+Beware that like other most other decoders **only exported fields** are
+considered when encoding and decoding; private fields are silently ignored.
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
+a similar way.
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 00000000..d3d3b839
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,511 @@
+package toml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "reflect"
+ "strings"
+ "time"
+)
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+//
+// This type can be used for any value, which will cause decoding to be delayed.
+// You can use the PrimitiveDecode() function to "manually" decode these values.
+//
+// NOTE: The underlying representation of a `Primitive` value is subject to
+// change. Do not rely on it.
+//
+// NOTE: Primitive values are still parsed, so using them will only avoid the
+// overhead of reflection. They can be useful when you don't know the exact type
+// of TOML data until runtime.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decoder decodes TOML data.
+//
+// TOML tables correspond to Go structs or maps (dealer's choice – they can be
+// used interchangeably).
+//
+// TOML table arrays correspond to either a slice of structs or a slice of maps.
+//
+// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
+// in the local timezone.
+//
+// All other TOML types (float, string, int, bool and array) correspond to the
+// obvious Go types.
+//
+// An exception to the above rules is if a type implements the TextUnmarshaler
+// interface, in which case any primitive TOML value (floats, strings, integers,
+// booleans, datetimes) will be converted to a []byte and given to the value's
+// UnmarshalText method. See the Unmarshaler example for a demonstration with
+// time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go struct.
+// The special `toml` struct tag can be used to map TOML keys to struct fields
+// that don't match the key name exactly (see the example). A case insensitive
+// match to struct names will be tried if an exact match can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there may
+// exist TOML values that cannot be placed into your representation, and there
+// may be parts of your representation that do not correspond to TOML values.
+// This loose mapping can be made stricter by using the IsDefined and/or
+// Undecoded methods on the MetaData returned.
+//
+// This decoder does not handle cyclic types. Decode will not terminate if a
+// cyclic type is passed.
+type Decoder struct {
+ r io.Reader
+}
+
+// NewDecoder creates a new Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// Decode TOML data in to the pointer `v`.
+func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
+ }
+ if rv.IsNil() {
+ return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
+ }
+
+ // TODO: have parser should read from io.Reader? Or at the very least, make
+ // it read from []byte rather than string
+ data, err := ioutil.ReadAll(dec.r)
+ if err != nil {
+ return MetaData{}, err
+ }
+
+ p, err := parse(string(data))
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, indirect(rv))
+}
+
+// Decode the TOML data in to the pointer v.
+//
+// See the documentation on Decoder for a description of the decoding process.
+func Decode(data string, v interface{}) (MetaData, error) {
+ return NewDecoder(strings.NewReader(data)).Decode(v)
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at path and decode it for you.
+func DecodeFile(path string, v interface{}) (MetaData, error) {
+ fp, err := os.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+ // Special case. Look for a `Primitive` value.
+ // TODO: #76 would make this superfluous after implemented.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // TODO:
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
+ // array. In particular, the unmarshaler should only be applied to primitive
+ // TOML values. But at this point, it will be applied to all kinds of values
+ // and produce an incorrect error whenever those values are hashes or arrays
+ // (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("unsupported type %s", rv.Type())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("unsupported type %s", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return e("type mismatch for %s: expected table but found %T",
+ rv.Type().String(), mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("cannot write unexported field %s.%s",
+ rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ if k := rv.Type().Key().Kind(); k != reflect.String {
+ return fmt.Errorf(
+ "toml: cannot decode to a map with non-string key type (%s in %q)",
+ k, rv.Type())
+ }
+
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ if l := datav.Len(); l != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ l := data.Len()
+ for i := 0; i < l; i++ {
+ err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("value %d is out of range for int8", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("value %d is out of range for int16", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("value %d is out of range for int32", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("value %d is out of range for uint8", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("value %d is out of range for uint16", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("value %d is out of range for uint32", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanSet() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func e(format string, args ...interface{}) error {
+ return fmt.Errorf("toml: "+format, args...)
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("cannot load TOML value of type %T into a Go %s", data, expected)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
new file mode 100644
index 00000000..38aa75fd
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_go116.go
@@ -0,0 +1,18 @@
+// +build go1.16
+
+package toml
+
+import (
+ "io/fs"
+)
+
+// DecodeFS is just like Decode, except it will automatically read the contents
+// of the file at `path` from a fs.FS instance.
+func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 00000000..ad8899c6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,123 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not be
+// inferable via reflection. In particular, whether a key has been defined and
+// the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined reports if the key exists in the TOML data.
+//
+// The key should be specified hierarchically, for example to access the TOML
+// key "a.b.c" you would use:
+//
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that does
+// not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
+// values of this type.
+type Key []string
+
+func (k Key) String() string { return strings.Join(k, ".") }
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ if k[i] == "" {
+ return `""`
+ }
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return `"` + quotedReplacer.Replace(k[i]) + `"`
+ }
+ return k[i]
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+//
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific. The list will have the same
+// order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
new file mode 100644
index 00000000..db89eac1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -0,0 +1,33 @@
+package toml
+
+import (
+ "encoding"
+ "io"
+)
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextMarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextMarshaler encoding.TextMarshaler
+
+// DEPRECATED!
+//
+// Use the identical encoding.TextUnmarshaler instead. It is defined here to
+// support Go 1.1 and older.
+type TextUnmarshaler encoding.TextUnmarshaler
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// DEPRECATED!
+//
+// Use NewDecoder(reader).Decode(&v) instead.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ return NewDecoder(r).Decode(v)
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 00000000..099c4a77
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,13 @@
+/*
+Package toml implements decoding and encoding of TOML files.
+
+This package supports TOML v1.0.0, as listed on https://toml.io
+
+There is also support for delaying decoding with the Primitive type, and
+querying the set of keys in a TOML document with the MetaData type.
+
+The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
+and can be used to verify if TOML document is valid. It can also be used to
+print the type of each key.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 00000000..10d88ac6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,650 @@
+package toml
+
+import (
+ "bufio"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/BurntSushi/toml/internal"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayNilElement = errors.New("toml: cannot encode array with nil element")
+ errNonString = errors.New("toml: cannot encode a map with non-string key type")
+ errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
+ errNoKey = errors.New("toml: top-level values must be Go maps or structs")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\"", "\\\"",
+ "\\", "\\\\",
+ "\x00", `\u0000`,
+ "\x01", `\u0001`,
+ "\x02", `\u0002`,
+ "\x03", `\u0003`,
+ "\x04", `\u0004`,
+ "\x05", `\u0005`,
+ "\x06", `\u0006`,
+ "\x07", `\u0007`,
+ "\b", `\b`,
+ "\t", `\t`,
+ "\n", `\n`,
+ "\x0b", `\u000b`,
+ "\f", `\f`,
+ "\r", `\r`,
+ "\x0e", `\u000e`,
+ "\x0f", `\u000f`,
+ "\x10", `\u0010`,
+ "\x11", `\u0011`,
+ "\x12", `\u0012`,
+ "\x13", `\u0013`,
+ "\x14", `\u0014`,
+ "\x15", `\u0015`,
+ "\x16", `\u0016`,
+ "\x17", `\u0017`,
+ "\x18", `\u0018`,
+ "\x19", `\u0019`,
+ "\x1a", `\u001a`,
+ "\x1b", `\u001b`,
+ "\x1c", `\u001c`,
+ "\x1d", `\u001d`,
+ "\x1e", `\u001e`,
+ "\x1f", `\u001f`,
+ "\x7f", `\u007f`,
+)
+
+// Encoder encodes a Go to a TOML document.
+//
+// The mapping between Go values and TOML values should be precisely the same as
+// for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.
+//
+// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
+// are encoded first.
+//
+// Go maps will be sorted alphabetically by key for deterministic output.
+//
+// Encoding Go values without a corresponding TOML representation will return an
+// error. Examples of this includes maps with non-string keys, slices with nil
+// elements, embedded non-struct types, and nested slices containing maps or
+// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
+// is okay, as is []map[string][]string).
+//
+// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
+// keys are silently discarded.
+type Encoder struct {
+ // The string to use for a single indentation level. The default is two
+ // spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder create a new Encoder.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the Encoder's writer.
+//
+// An error is returned if the value given cannot be encoded to a valid TOML
+// document.
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch t := rv.Interface().(type) {
+ case time.Time, encoding.TextMarshaler:
+ enc.writeKeyValue(key, rv, false)
+ return
+ // TODO: #76 would make this superfluous after implemented.
+ case Primitive:
+ enc.encode(key, reflect.ValueOf(t.undecoded))
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.writeKeyValue(key, rv, false)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.writeKeyValue(key, rv, false)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element.
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
+ format := time.RFC3339Nano
+ switch v.Location() {
+ case internal.LocalDatetime:
+ format = "2006-01-02T15:04:05.999999999"
+ case internal.LocalDate:
+ format = "2006-01-02"
+ case internal.LocalTime:
+ format = "15:04:05.999999999"
+ }
+ switch v.Location() {
+ default:
+ enc.wf(v.Format(format))
+ case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
+ enc.wf(v.In(time.UTC).Format(format))
+ }
+ return
+ case encoding.TextMarshaler:
+ // Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+
+ switch rv.Kind() {
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
+ }
+ case reflect.Float64:
+ f := rv.Float()
+ if math.IsNaN(f) {
+ enc.wf("nan")
+ } else if math.IsInf(f, 0) {
+ enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ } else {
+ enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
+ }
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Struct:
+ enc.eStruct(nil, rv, true)
+ case reflect.Map:
+ enc.eMap(nil, rv, true)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ default:
+ encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one number on
+// either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv, false)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ if len(key) == 1 {
+ // Output an extra newline between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv, false)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv, inline)
+ case reflect.Struct:
+ enc.eStruct(key, rv, inline)
+ default:
+ // Should never happen?
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string, trailC bool) {
+ sort.Strings(mapKeys)
+ for i, mapKey := range mapKeys {
+ val := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(val) {
+ continue
+ }
+
+ if inline {
+ enc.writeKeyValue(Key{mapKey}, val, true)
+ if trailC || i != len(mapKeys)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(mapKey), val)
+ }
+ }
+ }
+
+ if inline {
+ enc.wf("{")
+ }
+ writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
+ writeMapKeys(mapKeysSub, false)
+ if inline {
+ enc.wf("}")
+ }
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table then all keys under it will be in that
+ // table (not the one we're writing here).
+ //
+ // Fields is a [][]int: for fieldsDirect this always has one entry (the
+ // struct index). For fieldsSub it contains two entries: the parent field
+ // index from tv, and the field indexes for the fields of the sub.
+ var (
+ rt = rv.Type()
+ fieldsDirect, fieldsSub [][]int
+ addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ )
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
+ continue
+ }
+
+ frv := rv.Field(i)
+
+ // Treat anonymous struct fields with tag names as though they are
+ // not anonymous, like encoding/json does.
+ //
+ // Non-struct anonymous fields use the normal encoding logic.
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ if getOptions(f.Tag).name == "" {
+ addFields(t, frv, append(start, f.Index...))
+ continue
+ }
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
+ }
+ continue
+ }
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ writeFields := func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ fieldType := rt.FieldByIndex(fieldIndex)
+ fieldVal := rv.FieldByIndex(fieldIndex)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
+ continue
+ }
+
+ opts := getOptions(fieldType.Tag)
+ if opts.skip {
+ continue
+ }
+ keyName := fieldType.Name
+ if opts.name != "" {
+ keyName = opts.name
+ }
+ if opts.omitempty && isEmpty(fieldVal) {
+ continue
+ }
+ if opts.omitzero && isZero(fieldVal) {
+ continue
+ }
+
+ if inline {
+ enc.writeKeyValue(Key{keyName}, fieldVal, true)
+ if fieldIndex[0] != len(fields)-1 {
+ enc.wf(", ")
+ }
+ } else {
+ enc.encode(key.add(keyName), fieldVal)
+ }
+ }
+ }
+
+ if inline {
+ enc.wf("{")
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+ if inline {
+ enc.wf("}")
+ }
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ }
+ return tomlArray
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case encoding.TextMarshaler:
+ return tomlString
+ default:
+ // Someone used a pointer receiver: we can make it work for pointer
+ // values.
+ if rv.CanAddr() {
+ _, ok := rv.Addr().Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ }
+ return tomlHash
+ }
+ default:
+ _, ok := rv.Interface().(encoding.TextMarshaler)
+ if ok {
+ return tomlString
+ }
+ encPanic(errors.New("unsupported type: " + rv.Kind().String()))
+ panic("") // Need *some* return value
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+
+ /// Don't allow nil.
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ if tomlTypeOfGo(rv.Index(i)) == nil {
+ encPanic(errArrayNilElement)
+ }
+ }
+
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+ return firstType
+}
+
+type tagOptions struct {
+ skip bool // "-"
+ name string
+ omitempty bool
+ omitzero bool
+}
+
+func getOptions(tag reflect.StructTag) tagOptions {
+ t := tag.Get("toml")
+ if t == "-" {
+ return tagOptions{skip: true}
+ }
+ var opts tagOptions
+ parts := strings.Split(t, ",")
+ opts.name = parts[0]
+ for _, s := range parts[1:] {
+ switch s {
+ case "omitempty":
+ opts.omitempty = true
+ case "omitzero":
+ opts.omitzero = true
+ }
+ }
+ return opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+// Write a key/value pair:
+//
+// key =
+//
+// If inline is true it won't add a newline at the end.
+func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ if !inline {
+ enc.newline()
+ }
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go
new file mode 100644
index 00000000..022f15bc
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/internal/tz.go
@@ -0,0 +1,36 @@
+package internal
+
+import "time"
+
+// Timezones used for local datetime, date, and time TOML types.
+//
+// The exact way times and dates without a timezone should be interpreted is not
+// well-defined in the TOML specification and left to the implementation. These
+// defaults to current local timezone offset of the computer, but this can be
+// changed by changing these variables before decoding.
+//
+// TODO:
+// Ideally we'd like to offer people the ability to configure the used timezone
+// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
+// tricky: the reason we use three different variables for this is to support
+// round-tripping – without these specific TZ names we wouldn't know which
+// format to use.
+//
+// There isn't a good way to encode this right now though, and passing this sort
+// of information also ties in to various related issues such as string format
+// encoding, encoding of comments, etc.
+//
+// So, for the time being, just put this in internal until we can write a good
+// comprehensive API for doing all of this.
+//
+// The reason they're exported is because they're referred from in e.g.
+// internal/tag.
+//
+// Note that this behaviour is valid according to the TOML spec as the exact
+// behaviour is left up to implementations.
+var (
+ localOffset = func() int { _, o := time.Now().Zone(); return o }()
+ LocalDatetime = time.FixedZone("datetime-local", localOffset)
+ LocalDate = time.FixedZone("date-local", localOffset)
+ LocalTime = time.FixedZone("time-local", localOffset)
+)
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 00000000..adc4eb5d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,1225 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemKeyEnd
+ itemCommentStart
+ itemInlineTableStart
+ itemInlineTableEnd
+)
+
+const (
+ eof = 0
+ comma = ','
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+ inlineTableStart = '{'
+ inlineTableEnd = '}'
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+
+ // Allow for backing up up to four runes.
+ // This is necessary because TOML contains 3-rune tokens (""" and ''').
+ prevWidths [4]int
+ nprev int // how many of prevWidths are in use
+ // If we emit an eof, we can still back up, but it is not OK to call
+ // next again.
+ atEOF bool
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input,
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.atEOF {
+ panic("BUG in lexer: next called after EOF")
+ }
+ if lx.pos >= len(lx.input) {
+ lx.atEOF = true
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ lx.prevWidths[3] = lx.prevWidths[2]
+ lx.prevWidths[2] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[0]
+ if lx.nprev < 4 {
+ lx.nprev++
+ }
+
+ r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
+ if r == utf8.RuneError {
+ lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
+ return utf8.RuneError
+ }
+
+ lx.prevWidths[0] = w
+ lx.pos += w
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called 4 times between calls to next.
+func (lx *lexer) backup() {
+ if lx.atEOF {
+ lx.atEOF = false
+ return
+ }
+ if lx.nprev < 1 {
+ panic("BUG in lexer: backed up too far")
+ }
+ w := lx.prevWidths[0]
+ lx.prevWidths[0] = lx.prevWidths[1]
+ lx.prevWidths[1] = lx.prevWidths[2]
+ lx.prevWidths[2] = lx.prevWidths[3]
+ lx.nprev--
+ lx.pos -= w
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// skip ignores all input that matches the given predicate.
+func (lx *lexer) skip(pred func(rune) bool) {
+ for {
+ r := lx.next()
+ if pred(r) {
+ continue
+ }
+ lx.backup()
+ lx.ignore()
+ return
+ }
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (newlines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("unexpected EOF")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a newline. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a newline for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.emit(itemEOF)
+ return nil
+ }
+ return lx.errorf(
+ "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
+ r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf(
+ "expected end of table array name delimiter %q, but got %q instead",
+ arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("unexpected end of table name (table names cannot be empty)")
+ case r == tableSep:
+ return lx.errorf("unexpected table separator (table names cannot be empty)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexQuotedName
+ default:
+ lx.push(lexTableNameEnd)
+ return lexBareName
+ }
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
+ }
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only 'a' inside 'a.b'.
+func lexBareName(lx *lexer) stateFn {
+ r := lx.next()
+ if isBareKeyChar(r) {
+ return lexBareName
+ }
+ lx.backup()
+ lx.emit(itemText)
+ return lx.pop()
+}
+
+// lexBareName lexes one part of a key or table.
+//
+// It assumes that at least one valid character for the table has already been
+// read.
+//
+// Lexes only one part, e.g. only '"a"' inside '"a".b'.
+func lexQuotedName(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case r == stringStart:
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected value")
+ default:
+ return lx.errorf("expected value but found %q instead", r)
+ }
+}
+
+// lexKeyStart consumes all key parts until a '='.
+func lexKeyStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '=': key name appears blank")
+ case r == '.':
+ return lx.errorf("unexpected '.': keys cannot start with a '.'")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ fallthrough
+ default: // Bare key
+ lx.emit(itemKeyStart)
+ return lexKeyNameStart
+ }
+}
+
+func lexKeyNameStart(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.peek(); {
+ case r == '=' || r == eof:
+ return lx.errorf("unexpected '='")
+ case r == '.':
+ return lx.errorf("unexpected '.'")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexKeyEnd)
+ return lexQuotedName
+ default:
+ lx.push(lexKeyEnd)
+ return lexBareName
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ lx.skip(isWhitespace)
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ case r == eof:
+ return lx.errorf("unexpected EOF; expected key separator %q", keySep)
+ case r == '.':
+ lx.ignore()
+ return lexKeyNameStart
+ case r == '=':
+ lx.emit(itemKeyEnd)
+ return lexSkip(lx, lexValue)
+ default:
+ return lx.errorf("expected '.' or '=', but got %q instead", r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT newlines.
+ // In array syntax, the array states are responsible for ignoring newlines.
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexValue)
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ }
+ switch r {
+ case arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case inlineTableStart:
+ lx.ignore()
+ lx.emit(itemInlineTableStart)
+ return lexInlineTableValue
+ case stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case '.': // special error case, be kind to users
+ return lx.errorf("floats must start with a digit, not '.'")
+ case 'i', 'n':
+ if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
+ lx.emit(itemFloat)
+ return lx.pop()
+ }
+ case '-', '+':
+ return lexDecimalNumberStart
+ }
+ if unicode.IsLetter(r) {
+ // Be permissive here; lexBool will give a nice error if the
+ // user wrote something like
+ // x = foo
+ // (i.e. not 'true' or 'false' but is something else word-like.)
+ lx.backup()
+ return lexBool
+ }
+ if r == eof {
+ return lx.errorf("unexpected EOF; expected value")
+ }
+ return lx.errorf("expected value but found %q instead", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and newlines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == arrayEnd:
+ // NOTE(caleb): The spec isn't clear about whether you can have
+ // a trailing comma or not, so we'll allow it.
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes everything between the end of an array value and
+// the next value (or the end of the array): it ignores whitespace and newlines
+// and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf(
+ "expected a comma or array terminator %q, but got %s instead",
+ arrayEnd, runeOrEOF(r))
+}
+
+// lexArrayEnd finishes the lexing of an array.
+// It assumes that a ']' has just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexInlineTableValue consumes one key/value pair in an inline table.
+// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
+func lexInlineTableValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValue)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValue)
+ return lexCommentStart
+ case r == comma:
+ return lx.errorf("unexpected comma")
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ }
+ lx.backup()
+ lx.push(lexInlineTableValueEnd)
+ return lexKeyStart
+}
+
+// lexInlineTableValueEnd consumes everything between the end of an inline table
+// key/value pair and the next pair (or the end of the table):
+// it ignores whitespace and expects either a ',' or a '}'.
+func lexInlineTableValueEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexSkip(lx, lexInlineTableValueEnd)
+ case isNL(r):
+ return lx.errorf("newlines not allowed within inline tables")
+ case r == commentStart:
+ lx.push(lexInlineTableValueEnd)
+ return lexCommentStart
+ case r == comma:
+ lx.ignore()
+ lx.skip(isWhitespace)
+ if lx.peek() == '}' {
+ return lx.errorf("trailing comma not allowed in inline tables")
+ }
+ return lexInlineTableValue
+ case r == inlineTableEnd:
+ return lexInlineTableEnd
+ default:
+ return lx.errorf(
+ "expected a comma or an inline table terminator %q, but got %s instead",
+ inlineTableEnd, runeOrEOF(r))
+ }
+}
+
+func runeOrEOF(r rune) string {
+ if r == eof {
+ return "end of file"
+ }
+ return "'" + string(r) + "'"
+}
+
+// lexInlineTableEnd finishes the lexing of an inline table.
+// It assumes that a '}' has just been consumed.
+func lexInlineTableEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemInlineTableEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf(`unexpected EOF; expected '"'`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case eof:
+ return lx.errorf(`unexpected EOF; expected '"""'`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineString
+ case '\\':
+ return lexMultilineStringEscape
+ case stringEnd:
+ /// Found " → try to read two more "".
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ /// Peek ahead: the string can contain " and "", including at the
+ /// end: """str"""""
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == stringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), `"""""`) {
+ return lx.errorf(`unexpected '""""""'`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineString
+ }
+
+ lx.backup() /// backup: don't include the """ in the item.
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next() /// Read over ''' again and discard it.
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == eof:
+ return lx.errorf(`unexpected EOF; expected "'"`)
+ case isControl(r) || r == '\r':
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ case isNL(r):
+ return lx.errorf("strings cannot contain newlines")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'''" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case eof:
+ return lx.errorf(`unexpected EOF; expected "'''"`)
+ case '\r':
+ if lx.peek() != '\n' {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineRawString
+ case rawStringEnd:
+ /// Found ' → try to read two more ''.
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ /// Peek ahead: the string can contain ' and '', including at the
+ /// end: '''str'''''
+ /// 6 or more at the end, however, is an error.
+ if lx.peek() == rawStringEnd {
+ /// Check if we already lexed 5 's; if so we have 6 now, and
+ /// that's just too many man!
+ if strings.HasSuffix(lx.current(), "'''''") {
+ return lx.errorf(`unexpected "''''''"`)
+ }
+ lx.backup()
+ lx.backup()
+ return lexMultilineRawString
+ }
+
+ lx.backup() /// backup: don't include the ''' in the item.
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next() /// Read over ''' again and discard it.
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+
+ if isControl(r) {
+ return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ }
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case ' ', '\t':
+ // Inside """ .. """ strings you can use \ to escape newlines, and any
+ // amount of whitespace can be between the \ and \n.
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
+ `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(
+ `expected four hexadecimal digits after '\u', but got %q instead`,
+ lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf(
+ `expected eight hexadecimal digits after '\U', but got %q instead`,
+ lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart processes the first character of a value which begins
+// with a digit. It exists to catch values starting with '0', so that
+// lexBaseNumberOrDate can differentiate base prefixed integers from other
+// types.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case '0':
+ return lexBaseNumberOrDate
+ }
+
+ if !isDigit(r) {
+ // The only way to reach this state is if the value starts
+ // with a digit, so specifically treat anything else as an
+ // error.
+ return lx.errorf("expected a digit but got %q", r)
+ }
+
+ return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either an integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '-', ':':
+ return lexDatetime
+ case '_':
+ return lexDecimalNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDatetime consumes a Datetime, to a first approximation.
+// The parser validates that it matches one of the accepted formats.
+func lexDatetime(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDatetime
+ }
+ switch r {
+ case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
+ return lexDatetime
+ }
+
+ lx.backup()
+ lx.emitTrim(itemDatetime)
+ return lx.pop()
+}
+
+// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
+func lexHexInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isHexadecimal(r) {
+ return lexHexInteger
+ }
+ switch r {
+ case '_':
+ return lexHexInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
+func lexOctalInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isOctal(r) {
+ return lexOctalInteger
+ }
+ switch r {
+ case '_':
+ return lexOctalInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
+func lexBinaryInteger(lx *lexer) stateFn {
+ r := lx.next()
+ if isBinary(r) {
+ return lexBinaryInteger
+ }
+ switch r {
+ case '_':
+ return lexBinaryInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes a decimal float or integer.
+func lexDecimalNumber(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexDecimalNumber
+ }
+ switch r {
+ case '.', 'e', 'E':
+ return lexFloat
+ case '_':
+ return lexDecimalNumber
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDecimalNumber consumes the first digit of a number beginning with a sign.
+// It assumes the sign has already been consumed. Values which start with a sign
+// are only allowed to be decimal integers or floats.
+//
+// The special "nan" and "inf" values are also recognized.
+func lexDecimalNumberStart(lx *lexer) stateFn {
+ r := lx.next()
+
+ // Special error cases to give users better error messages
+ switch r {
+ case 'i':
+ if !lx.accept('n') || !lx.accept('f') {
+ return lx.errorf("invalid float: '%s'", lx.current())
+ }
+ lx.emit(itemFloat)
+ return lx.pop()
+ case 'n':
+ if !lx.accept('a') || !lx.accept('n') {
+ return lx.errorf("invalid float: '%s'", lx.current())
+ }
+ lx.emit(itemFloat)
+ return lx.pop()
+ case '0':
+ p := lx.peek()
+ switch p {
+ case 'b', 'o', 'x':
+ return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
+ }
+ case '.':
+ return lx.errorf("floats must start with a digit, not '.'")
+ }
+
+ if isDigit(r) {
+ return lexDecimalNumber
+ }
+
+ return lx.errorf("expected a digit but got %q", r)
+}
+
+// lexBaseNumberOrDate differentiates between the possible values which
+// start with '0'. It assumes that before reaching this state, the initial '0'
+// has been consumed.
+func lexBaseNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ // Note: All datetimes start with at least two digits, so we don't
+ // handle date characters (':', '-', etc.) here.
+ if isDigit(r) {
+ return lexNumberOrDate
+ }
+ switch r {
+ case '_':
+ // Can only be decimal, because there can't be an underscore
+ // between the '0' and the base designator, and dates can't
+ // contain underscores.
+ return lexDecimalNumber
+ case '.', 'e', 'E':
+ return lexFloat
+ case 'b':
+ r = lx.peek()
+ if !isBinary(r) {
+ lx.errorf("not a binary number: '%s%c'", lx.current(), r)
+ }
+ return lexBinaryInteger
+ case 'o':
+ r = lx.peek()
+ if !isOctal(r) {
+ lx.errorf("not an octal number: '%s%c'", lx.current(), r)
+ }
+ return lexOctalInteger
+ case 'x':
+ r = lx.peek()
+ if !isHexadecimal(r) {
+ lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+ }
+ return lexHexInteger
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloat consumes the elements of a float. It allows any sequence of
+// float-like characters, so floats emitted by the lexer are only a first
+// approximation and must be validated by the parser.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+ switch r {
+ case '_', '.', '-', '+', 'e', 'E':
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexBool consumes a bool string: 'true' or 'false.
+func lexBool(lx *lexer) stateFn {
+ var rs []rune
+ for {
+ r := lx.next()
+ if !unicode.IsLetter(r) {
+ lx.backup()
+ break
+ }
+ rs = append(rs, r)
+ }
+ s := string(rs)
+ switch s {
+ case "true", "false":
+ lx.emit(itemBool)
+ return lx.pop()
+ }
+ return lx.errorf("expected value but found %q instead", s)
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first newline character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isNL(r) || r == eof:
+ lx.backup()
+ lx.emit(itemText)
+ return lx.pop()
+ case isControl(r):
+ return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
+ default:
+ return lexComment
+ }
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ lx.ignore()
+ return nextState
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+// Control characters except \n, \t
+func isControl(r rune) bool {
+ switch r {
+ case '\t', '\r', '\n':
+ return false
+ default:
+ return (r >= 0x00 && r <= 0x1f) || r == 0x7f
+ }
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isOctal(r rune) bool {
+ return r >= '0' && r <= '7'
+}
+
+func isBinary(r rune) bool {
+ return r == '0' || r == '1'
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (s stateFn) String() string {
+ name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
+ if i := strings.LastIndexByte(name, '.'); i > -1 {
+ name = name[i+1:]
+ }
+ if s == nil {
+ name = ""
+ }
+ return name + "()"
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemKeyEnd:
+ return "KeyEnd"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ case itemInlineTableStart:
+ return "InlineTableStart"
+ case itemInlineTableEnd:
+ return "InlineTableEnd"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 00000000..d9ae5db9
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,739 @@
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/BurntSushi/toml/internal"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ ordered []Key // List of keys in the order that they appear in the TOML data.
+ context Key // Full key for the current hash in scope.
+ currentKey string // Base key name for everything except hashes.
+ approxLine int // Rough approximation of line number
+ implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
+}
+
+// ParseError is used when a file can't be parsed: for example invalid integer
+// literals, duplicate keys, etc.
+type ParseError struct {
+ Message string
+ Line int
+ LastKey string
+}
+
+func (pe ParseError) Error() string {
+ return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ pe.Line, pe.LastKey, pe.Message)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(ParseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
+ // which mangles stuff.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ data = data[2:]
+ }
+
+ // Examine first few bytes for NULL bytes; this probably means it's a UTF-16
+ // file (second byte in surrogate pair being NULL). Again, do this here to
+ // avoid having to deal with UTF-8/16 stuff in the lexer.
+ ex := 6
+ if len(data) < 6 {
+ ex = len(data)
+ }
+ if strings.ContainsRune(data[:ex], 0) {
+ return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
+ }
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf(format, v...)
+ panic(ParseError{
+ Message: msg,
+ Line: p.approxLine,
+ LastKey: p.current(),
+ })
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart: // # ..
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart: // [ .. ]
+ name := p.next()
+ p.approxLine = name.line
+
+ var key Key
+ for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
+ }
+ p.assertEqual(itemTableEnd, name.typ)
+
+ p.addContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart: // [[ .. ]]
+ name := p.next()
+ p.approxLine = name.line
+
+ var key Key
+ for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
+ key = append(key, p.keyString(name))
+ }
+ p.assertEqual(itemArrayTableEnd, name.typ)
+
+ p.addContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart: // key = ..
+ outerContext := p.context
+ /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
+
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
+
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
+ }
+
+ /// Set value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+
+ /// Remove the context we added (preserving any context from [tbl] lines).
+ p.context = outerContext
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it, false)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ }
+ panic("unreachable")
+}
+
+var datetimeRepl = strings.NewReplacer(
+ "z", "Z",
+ "t", "T",
+ " ", "T")
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemInteger:
+ return p.valueInteger(it)
+ case itemFloat:
+ return p.valueFloat(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ default:
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ }
+ case itemDatetime:
+ return p.valueDatetime(it)
+ case itemArray:
+ return p.valueArray(it)
+ case itemInlineTableStart:
+ return p.valueInlineTable(it, parentIsArray)
+ default:
+ p.bug("Unexpected value type: %s", it.typ)
+ }
+ panic("unreachable")
+}
+
+func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+ if !numUnderscoresOK(it.val) {
+ p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
+ }
+ if numHasLeadingZero(it.val) {
+ p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
+ }
+
+ num, err := strconv.ParseInt(it.val, 0, 64)
+ if err != nil {
+ // Distinguish integer values. Normally, it'd be a bug if the lexer
+ // provides an invalid integer, but it's possible that the number is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+ parts := strings.FieldsFunc(it.val, func(r rune) bool {
+ switch r {
+ case '.', 'e', 'E':
+ return true
+ }
+ return false
+ })
+ for _, part := range parts {
+ if !numUnderscoresOK(part) {
+ p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
+ }
+ }
+ if len(parts) > 0 && numHasLeadingZero(parts[0]) {
+ p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
+ }
+ if !numPeriodsOK(it.val) {
+ // As a special case, numbers like '123.' or '1.e2',
+ // which are valid as far as Go/strconv are concerned,
+ // must be rejected because TOML says that a fractional
+ // part consists of '.' followed by 1+ digits.
+ p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
+ }
+ val := strings.Replace(it.val, "_", "", -1)
+ if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ val = "nan"
+ }
+ num, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.panicf("Invalid float value: %q", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+}
+
+var dtTypes = []struct {
+ fmt string
+ zone *time.Location
+}{
+ {time.RFC3339Nano, time.Local},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
+ {"2006-01-02", internal.LocalDate},
+ {"15:04:05.999999999", internal.LocalTime},
+}
+
+func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+ it.val = datetimeRepl.Replace(it.val)
+ var (
+ t time.Time
+ ok bool
+ err error
+ )
+ for _, dt := range dtTypes {
+ t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
+ if err == nil {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ p.panicf("Invalid TOML Datetime: %q.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+}
+
+func (p *parser) valueArray(it item) (interface{}, tomlType) {
+ p.setType(p.currentKey, tomlArray)
+
+ // p.setType(p.currentKey, typ)
+ var (
+ array []interface{}
+ types []tomlType
+ )
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it, true)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, tomlArray
+}
+
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+ var (
+ hash = make(map[string]interface{})
+ outerContext = p.context
+ outerKey = p.currentKey
+ )
+
+ p.context = append(p.context, p.currentKey)
+ prevContext := p.context
+ p.currentKey = ""
+
+ p.addImplicit(p.context)
+ p.addContext(p.context, parentIsArray)
+
+ /// Loop over all table key/value pairs.
+ for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ /// Read all key parts.
+ k := p.next()
+ p.approxLine = k.line
+ var key Key
+ for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
+ key = append(key, p.keyString(k))
+ }
+ p.assertEqual(itemKeyEnd, k.typ)
+
+ /// The current key is the last part.
+ p.currentKey = key[len(key)-1]
+
+ /// All the other parts (if any) are the context; need to set each part
+ /// as implicit.
+ context := key[:len(key)-1]
+ for i := range context {
+ p.addImplicitContext(append(p.context, context[i:i+1]...))
+ }
+
+ /// Set the value.
+ val, typ := p.value(p.next(), false)
+ p.set(p.currentKey, val, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ hash[p.currentKey] = val
+
+ /// Restore context.
+ p.context = prevContext
+ }
+ p.context = outerContext
+ p.currentKey = outerKey
+ return hash, tomlHash
+}
+
+// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
+// +/- signs, and base prefixes.
+func numHasLeadingZero(s string) bool {
+ if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
+ return true
+ }
+ if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
+ return true
+ }
+ return false
+}
+
+// numUnderscoresOK checks whether each underscore in s is surrounded by
+// characters that are not underscores.
+func numUnderscoresOK(s string) bool {
+ switch s {
+ case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
+ return true
+ }
+ accept := false
+ for _, r := range s {
+ if r == '_' {
+ if !accept {
+ return false
+ }
+ }
+
+ // isHexadecimal is a superset of all the permissable characters
+ // surrounding an underscore.
+ accept = isHexadecimal(r)
+ }
+ return accept
+}
+
+// numPeriodsOK checks whether every period in s is followed by a digit.
+func numPeriodsOK(s string) bool {
+ period := false
+ for _, r := range s {
+ if period && !isDigit(r) {
+ return false
+ }
+ period = r == '.'
+ }
+ return !period
+}
+
+// Set the current context of the parser, where the context is either a hash or
+// an array of hashes, depending on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) addContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 4)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// set calls setValue and setType.
+func (p *parser) set(key string, val interface{}, typ tomlType) {
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var (
+ tmpHash interface{}
+ ok bool
+ hash = p.mapping
+ keyContext Key
+ )
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Normally redefining keys isn't allowed, but the key could have been
+ // defined implicitly and it's allowed to be redefined concretely. (See
+ // the `valid/implicit-and-explicit-after.toml` in toml-test)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isArray(keyContext) {
+ p.removeImplicit(keyContext)
+ hash[key] = value
+ return
+ }
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
+// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
+func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
+func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
+func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
+func (p *parser) addImplicitContext(key Key) {
+ p.addImplicit(key)
+ p.addContext(key, false)
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) > 0 && s[0] == '\n' {
+ return s[1:]
+ }
+ if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
+ return s[2:]
+ }
+ return s
+}
+
+// Remove newlines inside triple-quoted strings if a line ends with "\".
+func stripEscapedNewlines(s string) string {
+ split := strings.Split(s, "\n")
+ if len(split) < 1 {
+ return s
+ }
+
+ escNL := false // Keep track of the last non-blank line was escaped.
+ for i, line := range split {
+ line = strings.TrimRight(line, " \t\r")
+
+ if len(line) == 0 || line[len(line)-1] != '\\' {
+ split[i] = strings.TrimRight(split[i], "\r")
+ if !escNL && i != len(split)-1 {
+ split[i] += "\n"
+ }
+ continue
+ }
+
+ escBS := true
+ for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
+ escBS = !escBS
+ }
+ if escNL {
+ line = strings.TrimLeft(line, " \t\r")
+ }
+ escNL = !escBS
+
+ if escBS {
+ split[i] += "\n"
+ continue
+ }
+
+ split[i] = line[:len(line)-1] // Remove \
+ if len(split)-1 > i {
+ split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
+ }
+ }
+ return strings.Join(split, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case ' ', '\t':
+ p.panicf("invalid escape: '\\%c'", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 00000000..d56aa80f
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,70 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 00000000..608997c2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,242 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ opts := getOptions(sf.Tag)
+ if opts.skip {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := opts.name != ""
+ name := opts.name
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore
new file mode 100644
index 00000000..a3062bea
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.gitignore
@@ -0,0 +1 @@
+.vscode/*
diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml
new file mode 100644
index 00000000..9c359554
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - 1.x
+script:
+ - GOOS=windows go install github.com/chzyer/readline/example/...
+ - GOOS=linux go install github.com/chzyer/readline/example/...
+ - GOOS=darwin go install github.com/chzyer/readline/example/...
+ - go test -race -v
diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md
new file mode 100644
index 00000000..14ff5be1
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/CHANGELOG.md
@@ -0,0 +1,58 @@
+# ChangeLog
+
+### 1.4 - 2016-07-25
+
+* [#60][60] Support dynamic autocompletion
+* Fix ANSI parser on Windows
+* Fix wrong column width in complete mode on Windows
+* Remove dependent package "golang.org/x/crypto/ssh/terminal"
+
+### 1.3 - 2016-05-09
+
+* [#38][38] add SetChildren for prefix completer interface
+* [#42][42] improve multiple lines compatibility
+* [#43][43] remove sub-package(runes) for gopkg compatibility
+* [#46][46] Auto complete with space prefixed line
+* [#48][48] support suspend process (ctrl+Z)
+* [#49][49] fix bug that check equals with previous command
+* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty
+
+### 1.2 - 2016-03-05
+
+* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib)
+* [#23][23], support stdin remapping
+* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM.
+* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines.
+* Supports performs even stdin/stdout is not a tty.
+* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api.
+* [#28][28], fixes the history is not working as expected.
+* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)`
+
+### 1.1 - 2015-11-20
+
+* [#12][12] Add support for key ``/``/``
+* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`.
+* Bugs fixed for `PrefixCompleter`
+* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience.
+* Customable Interrupt/EOF prompt in `Config`
+* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices
+* Provides a new password user experience(`readline.ReadPasswordEx()`).
+
+### 1.0 - 2015-10-14
+
+* Initial public release.
+
+[12]: https://github.com/chzyer/readline/pull/12
+[17]: https://github.com/chzyer/readline/pull/17
+[23]: https://github.com/chzyer/readline/pull/23
+[27]: https://github.com/chzyer/readline/pull/27
+[28]: https://github.com/chzyer/readline/pull/28
+[33]: https://github.com/chzyer/readline/pull/33
+[38]: https://github.com/chzyer/readline/pull/38
+[42]: https://github.com/chzyer/readline/pull/42
+[43]: https://github.com/chzyer/readline/pull/43
+[46]: https://github.com/chzyer/readline/pull/46
+[48]: https://github.com/chzyer/readline/pull/48
+[49]: https://github.com/chzyer/readline/pull/49
+[53]: https://github.com/chzyer/readline/pull/53
+[60]: https://github.com/chzyer/readline/pull/60
diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE
new file mode 100644
index 00000000..c9afab3d
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Chzyer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
new file mode 100644
index 00000000..fab974b7
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/README.md
@@ -0,0 +1,114 @@
+[](https://travis-ci.org/chzyer/readline)
+[](LICENSE.md)
+[](https://github.com/chzyer/readline/releases)
+[](https://godoc.org/github.com/chzyer/readline)
+[](#backers)
+[](#sponsors)
+
+
+
+
+
+
+
+A powerful readline library in `Linux` `macOS` `Windows` `Solaris`
+
+## Guide
+
+* [Demo](example/readline-demo/readline-demo.go)
+* [Shortcut](doc/shortcut.md)
+
+## Repos using readline
+
+[](https://github.com/cockroachdb/cockroach)
+[](https://github.com/robertkrimen/otto)
+[](https://github.com/remind101/empire)
+[](https://github.com/mehrdadrad/mylg)
+[](https://github.com/knq/usql)
+[](https://github.com/youtube/doorman)
+[](https://github.com/bom-d-van/harp)
+[](https://github.com/abiosoft/ishell)
+[](https://github.com/Netflix/hal-9001)
+[](https://github.com/docker/go-p9p)
+
+
+## Feedback
+
+If you have any questions, please submit a github issue and any pull requests is welcomed :)
+
+* [https://twitter.com/chzyer](https://twitter.com/chzyer)
+* [http://weibo.com/2145262190](http://weibo.com/2145262190)
+
+
+## Backers
+
+Love Readline? Help me keep it alive by donating funds to cover project expenses!
+[[Become a backer](https://opencollective.com/readline#backer)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Sponsors
+
+Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go
new file mode 100644
index 00000000..63b908c1
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/ansi_windows.go
@@ -0,0 +1,249 @@
+// +build windows
+
+package readline
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode/utf8"
+ "unsafe"
+)
+
+const (
+ _ = uint16(0)
+ COLOR_FBLUE = 0x0001
+ COLOR_FGREEN = 0x0002
+ COLOR_FRED = 0x0004
+ COLOR_FINTENSITY = 0x0008
+
+ COLOR_BBLUE = 0x0010
+ COLOR_BGREEN = 0x0020
+ COLOR_BRED = 0x0040
+ COLOR_BINTENSITY = 0x0080
+
+ COMMON_LVB_UNDERSCORE = 0x8000
+ COMMON_LVB_BOLD = 0x0007
+)
+
+var ColorTableFg = []word{
+ 0, // 30: Black
+ COLOR_FRED, // 31: Red
+ COLOR_FGREEN, // 32: Green
+ COLOR_FRED | COLOR_FGREEN, // 33: Yellow
+ COLOR_FBLUE, // 34: Blue
+ COLOR_FRED | COLOR_FBLUE, // 35: Magenta
+ COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan
+ COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White
+}
+
+var ColorTableBg = []word{
+ 0, // 40: Black
+ COLOR_BRED, // 41: Red
+ COLOR_BGREEN, // 42: Green
+ COLOR_BRED | COLOR_BGREEN, // 43: Yellow
+ COLOR_BBLUE, // 44: Blue
+ COLOR_BRED | COLOR_BBLUE, // 45: Magenta
+ COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan
+ COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White
+}
+
+type ANSIWriter struct {
+ target io.Writer
+ wg sync.WaitGroup
+ ctx *ANSIWriterCtx
+ sync.Mutex
+}
+
+func NewANSIWriter(w io.Writer) *ANSIWriter {
+ a := &ANSIWriter{
+ target: w,
+ ctx: NewANSIWriterCtx(w),
+ }
+ return a
+}
+
+func (a *ANSIWriter) Close() error {
+ a.wg.Wait()
+ return nil
+}
+
+type ANSIWriterCtx struct {
+ isEsc bool
+ isEscSeq bool
+ arg []string
+ target *bufio.Writer
+ wantFlush bool
+}
+
+func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx {
+ return &ANSIWriterCtx{
+ target: bufio.NewWriter(target),
+ }
+}
+
+func (a *ANSIWriterCtx) Flush() {
+ a.target.Flush()
+}
+
+func (a *ANSIWriterCtx) process(r rune) bool {
+ if a.wantFlush {
+ if r == 0 || r == CharEsc {
+ a.wantFlush = false
+ a.target.Flush()
+ }
+ }
+ if a.isEscSeq {
+ a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg)
+ return true
+ }
+
+ switch r {
+ case CharEsc:
+ a.isEsc = true
+ case '[':
+ if a.isEsc {
+ a.arg = nil
+ a.isEscSeq = true
+ a.isEsc = false
+ break
+ }
+ fallthrough
+ default:
+ a.target.WriteRune(r)
+ a.wantFlush = true
+ }
+ return true
+}
+
+func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool {
+ arg := *argptr
+ var err error
+
+ if r >= 'A' && r <= 'D' {
+ count := short(GetInt(arg, 1))
+ info, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return false
+ }
+ switch r {
+ case 'A': // up
+ info.dwCursorPosition.y -= count
+ case 'B': // down
+ info.dwCursorPosition.y += count
+ case 'C': // right
+ info.dwCursorPosition.x += count
+ case 'D': // left
+ info.dwCursorPosition.x -= count
+ }
+ SetConsoleCursorPosition(&info.dwCursorPosition)
+ return false
+ }
+
+ switch r {
+ case 'J':
+ killLines()
+ case 'K':
+ eraseLine()
+ case 'm':
+ color := word(0)
+ for _, item := range arg {
+ var c int
+ c, err = strconv.Atoi(item)
+ if err != nil {
+ w.WriteString("[" + strings.Join(arg, ";") + "m")
+ break
+ }
+ if c >= 30 && c < 40 {
+ color ^= COLOR_FINTENSITY
+ color |= ColorTableFg[c-30]
+ } else if c >= 40 && c < 50 {
+ color ^= COLOR_BINTENSITY
+ color |= ColorTableBg[c-40]
+ } else if c == 4 {
+ color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7]
+ } else if c == 1 {
+ color |= COMMON_LVB_BOLD | COLOR_FINTENSITY
+ } else { // unknown code treat as reset
+ color = ColorTableFg[7]
+ }
+ }
+ if err != nil {
+ break
+ }
+ kernel.SetConsoleTextAttribute(stdout, uintptr(color))
+ case '\007': // set title
+ case ';':
+ if len(arg) == 0 || arg[len(arg)-1] != "" {
+ arg = append(arg, "")
+ *argptr = arg
+ }
+ return true
+ default:
+ if len(arg) == 0 {
+ arg = append(arg, "")
+ }
+ arg[len(arg)-1] += string(r)
+ *argptr = arg
+ return true
+ }
+ *argptr = nil
+ return false
+}
+
+func (a *ANSIWriter) Write(b []byte) (int, error) {
+ a.Lock()
+ defer a.Unlock()
+
+ off := 0
+ for len(b) > off {
+ r, size := utf8.DecodeRune(b[off:])
+ if size == 0 {
+ return off, io.ErrShortWrite
+ }
+ off += size
+ a.ctx.process(r)
+ }
+ a.ctx.Flush()
+ return off, nil
+}
+
+func killLines() error {
+ sbi, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return err
+ }
+
+ size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x
+ size += sbi.dwCursorPosition.x
+
+ var written int
+ kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+ return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+}
+
+func eraseLine() error {
+ sbi, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return err
+ }
+
+ size := sbi.dwSize.x
+ sbi.dwCursorPosition.x = 0
+ var written int
+ return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+}
diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go
new file mode 100644
index 00000000..c08c9941
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete.go
@@ -0,0 +1,285 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+)
+
+type AutoCompleter interface {
+ // Readline will pass the whole line and current offset to it
+ // Completer need to pass all the candidates, and how long they shared the same characters in line
+ // Example:
+ // [go, git, git-shell, grep]
+ // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1
+ // Do("gi", 2) => ["t", "t-shell"], 2
+ // Do("git", 3) => ["", "-shell"], 3
+ Do(line []rune, pos int) (newLine [][]rune, length int)
+}
+
+type TabCompleter struct{}
+
+func (t *TabCompleter) Do([]rune, int) ([][]rune, int) {
+ return [][]rune{[]rune("\t")}, 0
+}
+
+type opCompleter struct {
+ w io.Writer
+ op *Operation
+ width int
+
+ inCompleteMode bool
+ inSelectMode bool
+ candidate [][]rune
+ candidateSource []rune
+ candidateOff int
+ candidateChoise int
+ candidateColNum int
+}
+
+func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter {
+ return &opCompleter{
+ w: w,
+ op: op,
+ width: width,
+ }
+}
+
+func (o *opCompleter) doSelect() {
+ if len(o.candidate) == 1 {
+ o.op.buf.WriteRunes(o.candidate[0])
+ o.ExitCompleteMode(false)
+ return
+ }
+ o.nextCandidate(1)
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) nextCandidate(i int) {
+ o.candidateChoise += i
+ o.candidateChoise = o.candidateChoise % len(o.candidate)
+ if o.candidateChoise < 0 {
+ o.candidateChoise = len(o.candidate) + o.candidateChoise
+ }
+}
+
+func (o *opCompleter) OnComplete() bool {
+ if o.width == 0 {
+ return false
+ }
+ if o.IsInCompleteSelectMode() {
+ o.doSelect()
+ return true
+ }
+
+ buf := o.op.buf
+ rs := buf.Runes()
+
+ if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) {
+ o.EnterCompleteSelectMode()
+ o.doSelect()
+ return true
+ }
+
+ o.ExitCompleteSelectMode()
+ o.candidateSource = rs
+ newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx)
+ if len(newLines) == 0 {
+ o.ExitCompleteMode(false)
+ return true
+ }
+
+ // only Aggregate candidates in non-complete mode
+ if !o.IsInCompleteMode() {
+ if len(newLines) == 1 {
+ buf.WriteRunes(newLines[0])
+ o.ExitCompleteMode(false)
+ return true
+ }
+
+ same, size := runes.Aggregate(newLines)
+ if size > 0 {
+ buf.WriteRunes(same)
+ o.ExitCompleteMode(false)
+ return true
+ }
+ }
+
+ o.EnterCompleteMode(offset, newLines)
+ return true
+}
+
+func (o *opCompleter) IsInCompleteSelectMode() bool {
+ return o.inSelectMode
+}
+
+func (o *opCompleter) IsInCompleteMode() bool {
+ return o.inCompleteMode
+}
+
+func (o *opCompleter) HandleCompleteSelect(r rune) bool {
+ next := true
+ switch r {
+ case CharEnter, CharCtrlJ:
+ next = false
+ o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise])
+ o.ExitCompleteMode(false)
+ case CharLineStart:
+ num := o.candidateChoise % o.candidateColNum
+ o.nextCandidate(-num)
+ case CharLineEnd:
+ num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1
+ o.candidateChoise += num
+ if o.candidateChoise >= len(o.candidate) {
+ o.candidateChoise = len(o.candidate) - 1
+ }
+ case CharBackspace:
+ o.ExitCompleteSelectMode()
+ next = false
+ case CharTab, CharForward:
+ o.doSelect()
+ case CharBell, CharInterrupt:
+ o.ExitCompleteMode(true)
+ next = false
+ case CharNext:
+ tmpChoise := o.candidateChoise + o.candidateColNum
+ if tmpChoise >= o.getMatrixSize() {
+ tmpChoise -= o.getMatrixSize()
+ } else if tmpChoise >= len(o.candidate) {
+ tmpChoise += o.candidateColNum
+ tmpChoise -= o.getMatrixSize()
+ }
+ o.candidateChoise = tmpChoise
+ case CharBackward:
+ o.nextCandidate(-1)
+ case CharPrev:
+ tmpChoise := o.candidateChoise - o.candidateColNum
+ if tmpChoise < 0 {
+ tmpChoise += o.getMatrixSize()
+ if tmpChoise >= len(o.candidate) {
+ tmpChoise -= o.candidateColNum
+ }
+ }
+ o.candidateChoise = tmpChoise
+ default:
+ next = false
+ o.ExitCompleteSelectMode()
+ }
+ if next {
+ o.CompleteRefresh()
+ return true
+ }
+ return false
+}
+
+func (o *opCompleter) getMatrixSize() int {
+ line := len(o.candidate) / o.candidateColNum
+ if len(o.candidate)%o.candidateColNum != 0 {
+ line++
+ }
+ return line * o.candidateColNum
+}
+
+func (o *opCompleter) OnWidthChange(newWidth int) {
+ o.width = newWidth
+}
+
+func (o *opCompleter) CompleteRefresh() {
+ if !o.inCompleteMode {
+ return
+ }
+ lineCnt := o.op.buf.CursorLineCount()
+ colWidth := 0
+ for _, c := range o.candidate {
+ w := runes.WidthAll(c)
+ if w > colWidth {
+ colWidth = w
+ }
+ }
+ colWidth += o.candidateOff + 1
+ same := o.op.buf.RuneSlice(-o.candidateOff)
+
+ // -1 to avoid reach the end of line
+ width := o.width - 1
+ colNum := width / colWidth
+ if colNum != 0 {
+ colWidth += (width - (colWidth * colNum)) / colNum
+ }
+
+ o.candidateColNum = colNum
+ buf := bufio.NewWriter(o.w)
+ buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
+
+ colIdx := 0
+ lines := 1
+ buf.WriteString("\033[J")
+ for idx, c := range o.candidate {
+ inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode()
+ if inSelect {
+ buf.WriteString("\033[30;47m")
+ }
+ buf.WriteString(string(same))
+ buf.WriteString(string(c))
+ buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same)))
+
+ if inSelect {
+ buf.WriteString("\033[0m")
+ }
+
+ colIdx++
+ if colIdx == colNum {
+ buf.WriteString("\n")
+ lines++
+ colIdx = 0
+ }
+ }
+
+ // move back
+ fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines)
+ fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen())
+ buf.Flush()
+}
+
+func (o *opCompleter) aggCandidate(candidate [][]rune) int {
+ offset := 0
+ for i := 0; i < len(candidate[0]); i++ {
+ for j := 0; j < len(candidate)-1; j++ {
+ if i > len(candidate[j]) {
+ goto aggregate
+ }
+ if candidate[j][i] != candidate[j+1][i] {
+ goto aggregate
+ }
+ }
+ offset = i
+ }
+aggregate:
+ return offset
+}
+
+func (o *opCompleter) EnterCompleteSelectMode() {
+ o.inSelectMode = true
+ o.candidateChoise = -1
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) {
+ o.inCompleteMode = true
+ o.candidate = candidate
+ o.candidateOff = offset
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) ExitCompleteSelectMode() {
+ o.inSelectMode = false
+ o.candidate = nil
+ o.candidateChoise = -1
+ o.candidateOff = -1
+ o.candidateSource = nil
+}
+
+func (o *opCompleter) ExitCompleteMode(revent bool) {
+ o.inCompleteMode = false
+ o.ExitCompleteSelectMode()
+}
diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go
new file mode 100644
index 00000000..58d72487
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete_helper.go
@@ -0,0 +1,165 @@
+package readline
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Caller type for dynamic completion
+type DynamicCompleteFunc func(string) []string
+
+type PrefixCompleterInterface interface {
+ Print(prefix string, level int, buf *bytes.Buffer)
+ Do(line []rune, pos int) (newLine [][]rune, length int)
+ GetName() []rune
+ GetChildren() []PrefixCompleterInterface
+ SetChildren(children []PrefixCompleterInterface)
+}
+
+type DynamicPrefixCompleterInterface interface {
+ PrefixCompleterInterface
+ IsDynamic() bool
+ GetDynamicNames(line []rune) [][]rune
+}
+
+type PrefixCompleter struct {
+ Name []rune
+ Dynamic bool
+ Callback DynamicCompleteFunc
+ Children []PrefixCompleterInterface
+}
+
+func (p *PrefixCompleter) Tree(prefix string) string {
+ buf := bytes.NewBuffer(nil)
+ p.Print(prefix, 0, buf)
+ return buf.String()
+}
+
+func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) {
+ if strings.TrimSpace(string(p.GetName())) != "" {
+ buf.WriteString(prefix)
+ if level > 0 {
+ buf.WriteString("├")
+ buf.WriteString(strings.Repeat("─", (level*4)-2))
+ buf.WriteString(" ")
+ }
+ buf.WriteString(string(p.GetName()) + "\n")
+ level++
+ }
+ for _, ch := range p.GetChildren() {
+ ch.Print(prefix, level, buf)
+ }
+}
+
+func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) {
+ Print(p, prefix, level, buf)
+}
+
+func (p *PrefixCompleter) IsDynamic() bool {
+ return p.Dynamic
+}
+
+func (p *PrefixCompleter) GetName() []rune {
+ return p.Name
+}
+
+func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune {
+ var names = [][]rune{}
+ for _, name := range p.Callback(string(line)) {
+ names = append(names, []rune(name+" "))
+ }
+ return names
+}
+
+func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface {
+ return p.Children
+}
+
+func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) {
+ p.Children = children
+}
+
+func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter {
+ return PcItem("", pc...)
+}
+
+func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter {
+ name += " "
+ return &PrefixCompleter{
+ Name: []rune(name),
+ Dynamic: false,
+ Children: pc,
+ }
+}
+
+func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter {
+ return &PrefixCompleter{
+ Callback: callback,
+ Dynamic: true,
+ Children: pc,
+ }
+}
+
+func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) {
+ return doInternal(p, line, pos, line)
+}
+
+func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) {
+ return doInternal(p, line, pos, line)
+}
+
+func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) {
+ line = runes.TrimSpaceLeft(line[:pos])
+ goNext := false
+ var lineCompleter PrefixCompleterInterface
+ for _, child := range p.GetChildren() {
+ childNames := make([][]rune, 1)
+
+ childDynamic, ok := child.(DynamicPrefixCompleterInterface)
+ if ok && childDynamic.IsDynamic() {
+ childNames = childDynamic.GetDynamicNames(origLine)
+ } else {
+ childNames[0] = child.GetName()
+ }
+
+ for _, childName := range childNames {
+ if len(line) >= len(childName) {
+ if runes.HasPrefix(line, childName) {
+ if len(line) == len(childName) {
+ newLine = append(newLine, []rune{' '})
+ } else {
+ newLine = append(newLine, childName)
+ }
+ offset = len(childName)
+ lineCompleter = child
+ goNext = true
+ }
+ } else {
+ if runes.HasPrefix(childName, line) {
+ newLine = append(newLine, childName[len(line):])
+ offset = len(line)
+ lineCompleter = child
+ }
+ }
+ }
+ }
+
+ if len(newLine) != 1 {
+ return
+ }
+
+ tmpLine := make([]rune, 0, len(line))
+ for i := offset; i < len(line); i++ {
+ if line[i] == ' ' {
+ continue
+ }
+
+ tmpLine = append(tmpLine, line[i:]...)
+ return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine)
+ }
+
+ if goNext {
+ return doInternal(lineCompleter, nil, 0, origLine)
+ }
+ return
+}
diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go
new file mode 100644
index 00000000..5ceadd80
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete_segment.go
@@ -0,0 +1,82 @@
+package readline
+
+type SegmentCompleter interface {
+ // a
+ // |- a1
+ // |--- a11
+ // |- a2
+ // b
+ // input:
+ // DoTree([], 0) [a, b]
+ // DoTree([a], 1) [a]
+ // DoTree([a, ], 0) [a1, a2]
+ // DoTree([a, a], 1) [a1, a2]
+ // DoTree([a, a1], 2) [a1]
+ // DoTree([a, a1, ], 0) [a11]
+ // DoTree([a, a1, a], 1) [a11]
+ DoSegment([][]rune, int) [][]rune
+}
+
+type dumpSegmentCompleter struct {
+ f func([][]rune, int) [][]rune
+}
+
+func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune {
+ return d.f(segment, n)
+}
+
+func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter {
+ return &SegmentComplete{&dumpSegmentCompleter{f}}
+}
+
+func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete {
+ return &SegmentComplete{
+ SegmentCompleter: completer,
+ }
+}
+
+type SegmentComplete struct {
+ SegmentCompleter
+}
+
+func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) {
+ ret := make([][]rune, 0, len(cands))
+ lastSegment := segments[len(segments)-1]
+ for _, cand := range cands {
+ if !runes.HasPrefix(cand, lastSegment) {
+ continue
+ }
+ ret = append(ret, cand[len(lastSegment):])
+ }
+ return ret, idx
+}
+
+func SplitSegment(line []rune, pos int) ([][]rune, int) {
+ segs := [][]rune{}
+ lastIdx := -1
+ line = line[:pos]
+ pos = 0
+ for idx, l := range line {
+ if l == ' ' {
+ pos = 0
+ segs = append(segs, line[lastIdx+1:idx])
+ lastIdx = idx
+ } else {
+ pos++
+ }
+ }
+ segs = append(segs, line[lastIdx+1:])
+ return segs, pos
+}
+
+func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) {
+
+ segment, idx := SplitSegment(line, pos)
+
+ cands := c.DoSegment(segment, idx)
+ newLine, offset = RetSegment(segment, cands, idx)
+ for idx := range newLine {
+ newLine[idx] = append(newLine[idx], ' ')
+ }
+ return newLine, offset
+}
diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go
new file mode 100644
index 00000000..6b17c464
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/history.go
@@ -0,0 +1,330 @@
+package readline
+
+import (
+ "bufio"
+ "container/list"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+)
+
+type hisItem struct {
+ Source []rune
+ Version int64
+ Tmp []rune
+}
+
+func (h *hisItem) Clean() {
+ h.Source = nil
+ h.Tmp = nil
+}
+
+type opHistory struct {
+ cfg *Config
+ history *list.List
+ historyVer int64
+ current *list.Element
+ fd *os.File
+ fdLock sync.Mutex
+ enable bool
+}
+
+func newOpHistory(cfg *Config) (o *opHistory) {
+ o = &opHistory{
+ cfg: cfg,
+ history: list.New(),
+ enable: true,
+ }
+ return o
+}
+
+func (o *opHistory) Reset() {
+ o.history = list.New()
+ o.current = nil
+}
+
+func (o *opHistory) IsHistoryClosed() bool {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ return o.fd.Fd() == ^(uintptr(0))
+}
+
+func (o *opHistory) Init() {
+ if o.IsHistoryClosed() {
+ o.initHistory()
+ }
+}
+
+func (o *opHistory) initHistory() {
+ if o.cfg.HistoryFile != "" {
+ o.historyUpdatePath(o.cfg.HistoryFile)
+ }
+}
+
+// only called by newOpHistory
+func (o *opHistory) historyUpdatePath(path string) {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
+ if err != nil {
+ return
+ }
+ o.fd = f
+ r := bufio.NewReader(o.fd)
+ total := 0
+ for ; ; total++ {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ break
+ }
+ // ignore the empty line
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ continue
+ }
+ o.Push([]rune(line))
+ o.Compact()
+ }
+ if total > o.cfg.HistoryLimit {
+ o.rewriteLocked()
+ }
+ o.historyVer++
+ o.Push(nil)
+ return
+}
+
+func (o *opHistory) Compact() {
+ for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 {
+ o.history.Remove(o.history.Front())
+ }
+}
+
+func (o *opHistory) Rewrite() {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ o.rewriteLocked()
+}
+
+func (o *opHistory) rewriteLocked() {
+ if o.cfg.HistoryFile == "" {
+ return
+ }
+
+ tmpFile := o.cfg.HistoryFile + ".tmp"
+ fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666)
+ if err != nil {
+ return
+ }
+
+ buf := bufio.NewWriter(fd)
+ for elem := o.history.Front(); elem != nil; elem = elem.Next() {
+ buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n")
+ }
+ buf.Flush()
+
+ // replace history file
+ if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil {
+ fd.Close()
+ return
+ }
+
+ if o.fd != nil {
+ o.fd.Close()
+ }
+ // fd is write only, just satisfy what we need.
+ o.fd = fd
+}
+
+func (o *opHistory) Close() {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ if o.fd != nil {
+ o.fd.Close()
+ }
+}
+
+func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
+ for elem := o.current; elem != nil; elem = elem.Prev() {
+ item := o.showItem(elem.Value)
+ if isNewSearch {
+ start += len(rs)
+ }
+ if elem == o.current {
+ if len(item) >= start {
+ item = item[:start]
+ }
+ }
+ idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold)
+ if idx < 0 {
+ continue
+ }
+ return idx, elem
+ }
+ return -1, nil
+}
+
+func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
+ for elem := o.current; elem != nil; elem = elem.Next() {
+ item := o.showItem(elem.Value)
+ if isNewSearch {
+ start -= len(rs)
+ if start < 0 {
+ start = 0
+ }
+ }
+ if elem == o.current {
+ if len(item)-1 >= start {
+ item = item[start:]
+ } else {
+ continue
+ }
+ }
+ idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold)
+ if idx < 0 {
+ continue
+ }
+ if elem == o.current {
+ idx += start
+ }
+ return idx, elem
+ }
+ return -1, nil
+}
+
+func (o *opHistory) showItem(obj interface{}) []rune {
+ item := obj.(*hisItem)
+ if item.Version == o.historyVer {
+ return item.Tmp
+ }
+ return item.Source
+}
+
+func (o *opHistory) Prev() []rune {
+ if o.current == nil {
+ return nil
+ }
+ current := o.current.Prev()
+ if current == nil {
+ return nil
+ }
+ o.current = current
+ return runes.Copy(o.showItem(current.Value))
+}
+
+func (o *opHistory) Next() ([]rune, bool) {
+ if o.current == nil {
+ return nil, false
+ }
+ current := o.current.Next()
+ if current == nil {
+ return nil, false
+ }
+
+ o.current = current
+ return runes.Copy(o.showItem(current.Value)), true
+}
+
+// Disable the current history
+func (o *opHistory) Disable() {
+ o.enable = false
+}
+
+// Enable the current history
+func (o *opHistory) Enable() {
+ o.enable = true
+}
+
+func (o *opHistory) debug() {
+ Debug("-------")
+ for item := o.history.Front(); item != nil; item = item.Next() {
+ Debug(fmt.Sprintf("%+v", item.Value))
+ }
+}
+
+// save history
+func (o *opHistory) New(current []rune) (err error) {
+
+ // history deactivated
+ if !o.enable {
+ return nil
+ }
+
+ current = runes.Copy(current)
+
+ // if just use last command without modify
+ // just clean lastest history
+ if back := o.history.Back(); back != nil {
+ prev := back.Prev()
+ if prev != nil {
+ if runes.Equal(current, prev.Value.(*hisItem).Source) {
+ o.current = o.history.Back()
+ o.current.Value.(*hisItem).Clean()
+ o.historyVer++
+ return nil
+ }
+ }
+ }
+
+ if len(current) == 0 {
+ o.current = o.history.Back()
+ if o.current != nil {
+ o.current.Value.(*hisItem).Clean()
+ o.historyVer++
+ return nil
+ }
+ }
+
+ if o.current != o.history.Back() {
+ // move history item to current command
+ currentItem := o.current.Value.(*hisItem)
+ // set current to last item
+ o.current = o.history.Back()
+
+ current = runes.Copy(currentItem.Tmp)
+ }
+
+ // err only can be a IO error, just report
+ err = o.Update(current, true)
+
+ // push a new one to commit current command
+ o.historyVer++
+ o.Push(nil)
+ return
+}
+
+func (o *opHistory) Revert() {
+ o.historyVer++
+ o.current = o.history.Back()
+}
+
+func (o *opHistory) Update(s []rune, commit bool) (err error) {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ s = runes.Copy(s)
+ if o.current == nil {
+ o.Push(s)
+ o.Compact()
+ return
+ }
+ r := o.current.Value.(*hisItem)
+ r.Version = o.historyVer
+ if commit {
+ r.Source = s
+ if o.fd != nil {
+ // just report the error
+ _, err = o.fd.Write([]byte(string(r.Source) + "\n"))
+ }
+ } else {
+ r.Tmp = append(r.Tmp[:0], s...)
+ }
+ o.current.Value = r
+ o.Compact()
+ return
+}
+
+func (o *opHistory) Push(s []rune) {
+ s = runes.Copy(s)
+ elem := o.history.PushBack(&hisItem{Source: s})
+ o.current = elem
+}
diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go
new file mode 100644
index 00000000..4c31624f
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/operation.go
@@ -0,0 +1,531 @@
+package readline
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+var (
+ ErrInterrupt = errors.New("Interrupt")
+)
+
+type InterruptError struct {
+ Line []rune
+}
+
+func (*InterruptError) Error() string {
+ return "Interrupted"
+}
+
+type Operation struct {
+ m sync.Mutex
+ cfg *Config
+ t *Terminal
+ buf *RuneBuffer
+ outchan chan []rune
+ errchan chan error
+ w io.Writer
+
+ history *opHistory
+ *opSearch
+ *opCompleter
+ *opPassword
+ *opVim
+}
+
+func (o *Operation) SetBuffer(what string) {
+ o.buf.Set([]rune(what))
+}
+
+type wrapWriter struct {
+ r *Operation
+ t *Terminal
+ target io.Writer
+}
+
+func (w *wrapWriter) Write(b []byte) (int, error) {
+ if !w.t.IsReading() {
+ return w.target.Write(b)
+ }
+
+ var (
+ n int
+ err error
+ )
+ w.r.buf.Refresh(func() {
+ n, err = w.target.Write(b)
+ })
+
+ if w.r.IsSearchMode() {
+ w.r.SearchRefresh(-1)
+ }
+ if w.r.IsInCompleteMode() {
+ w.r.CompleteRefresh()
+ }
+ return n, err
+}
+
+func NewOperation(t *Terminal, cfg *Config) *Operation {
+ width := cfg.FuncGetWidth()
+ op := &Operation{
+ t: t,
+ buf: NewRuneBuffer(t, cfg.Prompt, cfg, width),
+ outchan: make(chan []rune),
+ errchan: make(chan error, 1),
+ }
+ op.w = op.buf.w
+ op.SetConfig(cfg)
+ op.opVim = newVimMode(op)
+ op.opCompleter = newOpCompleter(op.buf.w, op, width)
+ op.opPassword = newOpPassword(op)
+ op.cfg.FuncOnWidthChanged(func() {
+ newWidth := cfg.FuncGetWidth()
+ op.opCompleter.OnWidthChange(newWidth)
+ op.opSearch.OnWidthChange(newWidth)
+ op.buf.OnWidthChange(newWidth)
+ })
+ go op.ioloop()
+ return op
+}
+
+func (o *Operation) SetPrompt(s string) {
+ o.buf.SetPrompt(s)
+}
+
+func (o *Operation) SetMaskRune(r rune) {
+ o.buf.SetMask(r)
+}
+
+func (o *Operation) GetConfig() *Config {
+ o.m.Lock()
+ cfg := *o.cfg
+ o.m.Unlock()
+ return &cfg
+}
+
+func (o *Operation) ioloop() {
+ for {
+ keepInSearchMode := false
+ keepInCompleteMode := false
+ r := o.t.ReadRune()
+ if o.GetConfig().FuncFilterInputRune != nil {
+ var process bool
+ r, process = o.GetConfig().FuncFilterInputRune(r)
+ if !process {
+ o.buf.Refresh(nil) // to refresh the line
+ continue // ignore this rune
+ }
+ }
+
+ if r == 0 { // io.EOF
+ if o.buf.Len() == 0 {
+ o.buf.Clean()
+ select {
+ case o.errchan <- io.EOF:
+ }
+ break
+ } else {
+ // if stdin got io.EOF and there is something left in buffer,
+ // let's flush them by sending CharEnter.
+ // And we will got io.EOF int next loop.
+ r = CharEnter
+ }
+ }
+ isUpdateHistory := true
+
+ if o.IsInCompleteSelectMode() {
+ keepInCompleteMode = o.HandleCompleteSelect(r)
+ if keepInCompleteMode {
+ continue
+ }
+
+ o.buf.Refresh(nil)
+ switch r {
+ case CharEnter, CharCtrlJ:
+ o.history.Update(o.buf.Runes(), false)
+ fallthrough
+ case CharInterrupt:
+ o.t.KickRead()
+ fallthrough
+ case CharBell:
+ continue
+ }
+ }
+
+ if o.IsEnableVimMode() {
+ r = o.HandleVim(r, o.t.ReadRune)
+ if r == 0 {
+ continue
+ }
+ }
+
+ switch r {
+ case CharBell:
+ if o.IsSearchMode() {
+ o.ExitSearchMode(true)
+ o.buf.Refresh(nil)
+ }
+ if o.IsInCompleteMode() {
+ o.ExitCompleteMode(true)
+ o.buf.Refresh(nil)
+ }
+ case CharTab:
+ if o.GetConfig().AutoComplete == nil {
+ o.t.Bell()
+ break
+ }
+ if o.OnComplete() {
+ keepInCompleteMode = true
+ } else {
+ o.t.Bell()
+ break
+ }
+
+ case CharBckSearch:
+ if !o.SearchMode(S_DIR_BCK) {
+ o.t.Bell()
+ break
+ }
+ keepInSearchMode = true
+ case CharCtrlU:
+ o.buf.KillFront()
+ case CharFwdSearch:
+ if !o.SearchMode(S_DIR_FWD) {
+ o.t.Bell()
+ break
+ }
+ keepInSearchMode = true
+ case CharKill:
+ o.buf.Kill()
+ keepInCompleteMode = true
+ case MetaForward:
+ o.buf.MoveToNextWord()
+ case CharTranspose:
+ o.buf.Transpose()
+ case MetaBackward:
+ o.buf.MoveToPrevWord()
+ case MetaDelete:
+ o.buf.DeleteWord()
+ case CharLineStart:
+ o.buf.MoveToLineStart()
+ case CharLineEnd:
+ o.buf.MoveToLineEnd()
+ case CharBackspace, CharCtrlH:
+ if o.IsSearchMode() {
+ o.SearchBackspace()
+ keepInSearchMode = true
+ break
+ }
+
+ if o.buf.Len() == 0 {
+ o.t.Bell()
+ break
+ }
+ o.buf.Backspace()
+ if o.IsInCompleteMode() {
+ o.OnComplete()
+ }
+ case CharCtrlZ:
+ o.buf.Clean()
+ o.t.SleepToResume()
+ o.Refresh()
+ case CharCtrlL:
+ ClearScreen(o.w)
+ o.Refresh()
+ case MetaBackspace, CharCtrlW:
+ o.buf.BackEscapeWord()
+ case CharCtrlY:
+ o.buf.Yank()
+ case CharEnter, CharCtrlJ:
+ if o.IsSearchMode() {
+ o.ExitSearchMode(false)
+ }
+ o.buf.MoveToLineEnd()
+ var data []rune
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteRune('\n')
+ data = o.buf.Reset()
+ data = data[:len(data)-1] // trim \n
+ } else {
+ o.buf.Clean()
+ data = o.buf.Reset()
+ }
+ o.outchan <- data
+ if !o.GetConfig().DisableAutoSaveHistory {
+ // ignore IO error
+ _ = o.history.New(data)
+ } else {
+ isUpdateHistory = false
+ }
+ case CharBackward:
+ o.buf.MoveBackward()
+ case CharForward:
+ o.buf.MoveForward()
+ case CharPrev:
+ buf := o.history.Prev()
+ if buf != nil {
+ o.buf.Set(buf)
+ } else {
+ o.t.Bell()
+ }
+ case CharNext:
+ buf, ok := o.history.Next()
+ if ok {
+ o.buf.Set(buf)
+ } else {
+ o.t.Bell()
+ }
+ case CharDelete:
+ if o.buf.Len() > 0 || !o.IsNormalMode() {
+ o.t.KickRead()
+ if !o.buf.Delete() {
+ o.t.Bell()
+ }
+ break
+ }
+
+ // treat as EOF
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteString(o.GetConfig().EOFPrompt + "\n")
+ }
+ o.buf.Reset()
+ isUpdateHistory = false
+ o.history.Revert()
+ o.errchan <- io.EOF
+ if o.GetConfig().UniqueEditLine {
+ o.buf.Clean()
+ }
+ case CharInterrupt:
+ if o.IsSearchMode() {
+ o.t.KickRead()
+ o.ExitSearchMode(true)
+ break
+ }
+ if o.IsInCompleteMode() {
+ o.t.KickRead()
+ o.ExitCompleteMode(true)
+ o.buf.Refresh(nil)
+ break
+ }
+ o.buf.MoveToLineEnd()
+ o.buf.Refresh(nil)
+ hint := o.GetConfig().InterruptPrompt + "\n"
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteString(hint)
+ }
+ remain := o.buf.Reset()
+ if !o.GetConfig().UniqueEditLine {
+ remain = remain[:len(remain)-len([]rune(hint))]
+ }
+ isUpdateHistory = false
+ o.history.Revert()
+ o.errchan <- &InterruptError{remain}
+ default:
+ if o.IsSearchMode() {
+ o.SearchChar(r)
+ keepInSearchMode = true
+ break
+ }
+ o.buf.WriteRune(r)
+ if o.IsInCompleteMode() {
+ o.OnComplete()
+ keepInCompleteMode = true
+ }
+ }
+
+ listener := o.GetConfig().Listener
+ if listener != nil {
+ newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r)
+ if ok {
+ o.buf.SetWithIdx(newPos, newLine)
+ }
+ }
+
+ o.m.Lock()
+ if !keepInSearchMode && o.IsSearchMode() {
+ o.ExitSearchMode(false)
+ o.buf.Refresh(nil)
+ } else if o.IsInCompleteMode() {
+ if !keepInCompleteMode {
+ o.ExitCompleteMode(false)
+ o.Refresh()
+ } else {
+ o.buf.Refresh(nil)
+ o.CompleteRefresh()
+ }
+ }
+ if isUpdateHistory && !o.IsSearchMode() {
+ // it will cause null history
+ o.history.Update(o.buf.Runes(), false)
+ }
+ o.m.Unlock()
+ }
+}
+
+func (o *Operation) Stderr() io.Writer {
+ return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t}
+}
+
+func (o *Operation) Stdout() io.Writer {
+ return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t}
+}
+
+func (o *Operation) String() (string, error) {
+ r, err := o.Runes()
+ return string(r), err
+}
+
+func (o *Operation) Runes() ([]rune, error) {
+ o.t.EnterRawMode()
+ defer o.t.ExitRawMode()
+
+ listener := o.GetConfig().Listener
+ if listener != nil {
+ listener.OnChange(nil, 0, 0)
+ }
+
+ o.buf.Refresh(nil) // print prompt
+ o.t.KickRead()
+ select {
+ case r := <-o.outchan:
+ return r, nil
+ case err := <-o.errchan:
+ if e, ok := err.(*InterruptError); ok {
+ return e.Line, ErrInterrupt
+ }
+ return nil, err
+ }
+}
+
+func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) {
+ cfg := o.GenPasswordConfig()
+ cfg.Prompt = prompt
+ cfg.Listener = l
+ return o.PasswordWithConfig(cfg)
+}
+
+func (o *Operation) GenPasswordConfig() *Config {
+ return o.opPassword.PasswordConfig()
+}
+
+func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) {
+ if err := o.opPassword.EnterPasswordMode(cfg); err != nil {
+ return nil, err
+ }
+ defer o.opPassword.ExitPasswordMode()
+ return o.Slice()
+}
+
+func (o *Operation) Password(prompt string) ([]byte, error) {
+ return o.PasswordEx(prompt, nil)
+}
+
+func (o *Operation) SetTitle(t string) {
+ o.w.Write([]byte("\033[2;" + t + "\007"))
+}
+
+func (o *Operation) Slice() ([]byte, error) {
+ r, err := o.Runes()
+ if err != nil {
+ return nil, err
+ }
+ return []byte(string(r)), nil
+}
+
+func (o *Operation) Close() {
+ o.history.Close()
+}
+
+func (o *Operation) SetHistoryPath(path string) {
+ if o.history != nil {
+ o.history.Close()
+ }
+ o.cfg.HistoryFile = path
+ o.history = newOpHistory(o.cfg)
+}
+
+func (o *Operation) IsNormalMode() bool {
+ return !o.IsInCompleteMode() && !o.IsSearchMode()
+}
+
+func (op *Operation) SetConfig(cfg *Config) (*Config, error) {
+ op.m.Lock()
+ defer op.m.Unlock()
+ if op.cfg == cfg {
+ return op.cfg, nil
+ }
+ if err := cfg.Init(); err != nil {
+ return op.cfg, err
+ }
+ old := op.cfg
+ op.cfg = cfg
+ op.SetPrompt(cfg.Prompt)
+ op.SetMaskRune(cfg.MaskRune)
+ op.buf.SetConfig(cfg)
+ width := op.cfg.FuncGetWidth()
+
+ if cfg.opHistory == nil {
+ op.SetHistoryPath(cfg.HistoryFile)
+ cfg.opHistory = op.history
+ cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width)
+ }
+ op.history = cfg.opHistory
+
+ // SetHistoryPath will close opHistory which already exists
+ // so if we use it next time, we need to reopen it by `InitHistory()`
+ op.history.Init()
+
+ if op.cfg.AutoComplete != nil {
+ op.opCompleter = newOpCompleter(op.buf.w, op, width)
+ }
+
+ op.opSearch = cfg.opSearch
+ return old, nil
+}
+
+func (o *Operation) ResetHistory() {
+ o.history.Reset()
+}
+
+// if err is not nil, it just mean it fail to write to file
+// other things goes fine.
+func (o *Operation) SaveHistory(content string) error {
+ return o.history.New([]rune(content))
+}
+
+func (o *Operation) Refresh() {
+ if o.t.IsReading() {
+ o.buf.Refresh(nil)
+ }
+}
+
+func (o *Operation) Clean() {
+ o.buf.Clean()
+}
+
+func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener {
+ return &DumpListener{f: f}
+}
+
+type DumpListener struct {
+ f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
+}
+
+func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) {
+ return d.f(line, pos, key)
+}
+
+type Listener interface {
+ OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
+}
+
+type Painter interface {
+ Paint(line []rune, pos int) []rune
+}
+
+type defaultPainter struct{}
+
+func (p *defaultPainter) Paint(line []rune, _ int) []rune {
+ return line
+}
diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go
new file mode 100644
index 00000000..414288c2
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/password.go
@@ -0,0 +1,33 @@
+package readline
+
+type opPassword struct {
+ o *Operation
+ backupCfg *Config
+}
+
+func newOpPassword(o *Operation) *opPassword {
+ return &opPassword{o: o}
+}
+
+func (o *opPassword) ExitPasswordMode() {
+ o.o.SetConfig(o.backupCfg)
+ o.backupCfg = nil
+}
+
+func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) {
+ o.backupCfg, err = o.o.SetConfig(cfg)
+ return
+}
+
+func (o *opPassword) PasswordConfig() *Config {
+ return &Config{
+ EnableMask: true,
+ InterruptPrompt: "\n",
+ EOFPrompt: "\n",
+ HistoryLimit: -1,
+ Painter: &defaultPainter{},
+
+ Stdout: o.o.cfg.Stdout,
+ Stderr: o.o.cfg.Stderr,
+ }
+}
diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go
new file mode 100644
index 00000000..073ef150
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/rawreader_windows.go
@@ -0,0 +1,125 @@
+// +build windows
+
+package readline
+
+import "unsafe"
+
+const (
+ VK_CANCEL = 0x03
+ VK_BACK = 0x08
+ VK_TAB = 0x09
+ VK_RETURN = 0x0D
+ VK_SHIFT = 0x10
+ VK_CONTROL = 0x11
+ VK_MENU = 0x12
+ VK_ESCAPE = 0x1B
+ VK_LEFT = 0x25
+ VK_UP = 0x26
+ VK_RIGHT = 0x27
+ VK_DOWN = 0x28
+ VK_DELETE = 0x2E
+ VK_LSHIFT = 0xA0
+ VK_RSHIFT = 0xA1
+ VK_LCONTROL = 0xA2
+ VK_RCONTROL = 0xA3
+)
+
+// RawReader translate input record to ANSI escape sequence.
+// To provides same behavior as unix terminal.
+type RawReader struct {
+ ctrlKey bool
+ altKey bool
+}
+
+func NewRawReader() *RawReader {
+ r := new(RawReader)
+ return r
+}
+
+// only process one action in one read
+func (r *RawReader) Read(buf []byte) (int, error) {
+ ir := new(_INPUT_RECORD)
+ var read int
+ var err error
+next:
+ err = kernel.ReadConsoleInputW(stdin,
+ uintptr(unsafe.Pointer(ir)),
+ 1,
+ uintptr(unsafe.Pointer(&read)),
+ )
+ if err != nil {
+ return 0, err
+ }
+ if ir.EventType != EVENT_KEY {
+ goto next
+ }
+ ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0]))
+ if ker.bKeyDown == 0 { // keyup
+ if r.ctrlKey || r.altKey {
+ switch ker.wVirtualKeyCode {
+ case VK_RCONTROL, VK_LCONTROL:
+ r.ctrlKey = false
+ case VK_MENU: //alt
+ r.altKey = false
+ }
+ }
+ goto next
+ }
+
+ if ker.unicodeChar == 0 {
+ var target rune
+ switch ker.wVirtualKeyCode {
+ case VK_RCONTROL, VK_LCONTROL:
+ r.ctrlKey = true
+ case VK_MENU: //alt
+ r.altKey = true
+ case VK_LEFT:
+ target = CharBackward
+ case VK_RIGHT:
+ target = CharForward
+ case VK_UP:
+ target = CharPrev
+ case VK_DOWN:
+ target = CharNext
+ }
+ if target != 0 {
+ return r.write(buf, target)
+ }
+ goto next
+ }
+ char := rune(ker.unicodeChar)
+ if r.ctrlKey {
+ switch char {
+ case 'A':
+ char = CharLineStart
+ case 'E':
+ char = CharLineEnd
+ case 'R':
+ char = CharBckSearch
+ case 'S':
+ char = CharFwdSearch
+ }
+ } else if r.altKey {
+ switch char {
+ case VK_BACK:
+ char = CharBackspace
+ }
+ return r.writeEsc(buf, char)
+ }
+ return r.write(buf, char)
+}
+
+func (r *RawReader) writeEsc(b []byte, char rune) (int, error) {
+ b[0] = '\033'
+ n := copy(b[1:], []byte(string(char)))
+ return n + 1, nil
+}
+
+func (r *RawReader) write(b []byte, char rune) (int, error) {
+ n := copy(b, []byte(string(char)))
+ return n, nil
+}
+
+func (r *RawReader) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go
new file mode 100644
index 00000000..0e7aca06
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/readline.go
@@ -0,0 +1,326 @@
+// Readline is a pure go implementation for GNU-Readline kind library.
+//
+// example:
+// rl, err := readline.New("> ")
+// if err != nil {
+// panic(err)
+// }
+// defer rl.Close()
+//
+// for {
+// line, err := rl.Readline()
+// if err != nil { // io.EOF
+// break
+// }
+// println(line)
+// }
+//
+package readline
+
+import "io"
+
+type Instance struct {
+ Config *Config
+ Terminal *Terminal
+ Operation *Operation
+}
+
+type Config struct {
+ // prompt supports ANSI escape sequence, so we can color some characters even in windows
+ Prompt string
+
+ // readline will persist historys to file where HistoryFile specified
+ HistoryFile string
+ // specify the max length of historys, it's 500 by default, set it to -1 to disable history
+ HistoryLimit int
+ DisableAutoSaveHistory bool
+ // enable case-insensitive history searching
+ HistorySearchFold bool
+
+ // AutoCompleter will called once user press TAB
+ AutoComplete AutoCompleter
+
+ // Any key press will pass to Listener
+ // NOTE: Listener will be triggered by (nil, 0, 0) immediately
+ Listener Listener
+
+ Painter Painter
+
+ // If VimMode is true, readline will in vim.insert mode by default
+ VimMode bool
+
+ InterruptPrompt string
+ EOFPrompt string
+
+ FuncGetWidth func() int
+
+ Stdin io.ReadCloser
+ StdinWriter io.Writer
+ Stdout io.Writer
+ Stderr io.Writer
+
+ EnableMask bool
+ MaskRune rune
+
+ // erase the editing line after user submited it
+ // it use in IM usually.
+ UniqueEditLine bool
+
+ // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions)
+ // -> output = new (translated) rune and true/false if continue with processing this one
+ FuncFilterInputRune func(rune) (rune, bool)
+
+ // force use interactive even stdout is not a tty
+ FuncIsTerminal func() bool
+ FuncMakeRaw func() error
+ FuncExitRaw func() error
+ FuncOnWidthChanged func(func())
+ ForceUseInteractive bool
+
+ // private fields
+ inited bool
+ opHistory *opHistory
+ opSearch *opSearch
+}
+
+func (c *Config) useInteractive() bool {
+ if c.ForceUseInteractive {
+ return true
+ }
+ return c.FuncIsTerminal()
+}
+
+func (c *Config) Init() error {
+ if c.inited {
+ return nil
+ }
+ c.inited = true
+ if c.Stdin == nil {
+ c.Stdin = NewCancelableStdin(Stdin)
+ }
+
+ c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin)
+
+ if c.Stdout == nil {
+ c.Stdout = Stdout
+ }
+ if c.Stderr == nil {
+ c.Stderr = Stderr
+ }
+ if c.HistoryLimit == 0 {
+ c.HistoryLimit = 500
+ }
+
+ if c.InterruptPrompt == "" {
+ c.InterruptPrompt = "^C"
+ } else if c.InterruptPrompt == "\n" {
+ c.InterruptPrompt = ""
+ }
+ if c.EOFPrompt == "" {
+ c.EOFPrompt = "^D"
+ } else if c.EOFPrompt == "\n" {
+ c.EOFPrompt = ""
+ }
+
+ if c.AutoComplete == nil {
+ c.AutoComplete = &TabCompleter{}
+ }
+ if c.FuncGetWidth == nil {
+ c.FuncGetWidth = GetScreenWidth
+ }
+ if c.FuncIsTerminal == nil {
+ c.FuncIsTerminal = DefaultIsTerminal
+ }
+ rm := new(RawMode)
+ if c.FuncMakeRaw == nil {
+ c.FuncMakeRaw = rm.Enter
+ }
+ if c.FuncExitRaw == nil {
+ c.FuncExitRaw = rm.Exit
+ }
+ if c.FuncOnWidthChanged == nil {
+ c.FuncOnWidthChanged = DefaultOnWidthChanged
+ }
+
+ return nil
+}
+
+func (c Config) Clone() *Config {
+ c.opHistory = nil
+ c.opSearch = nil
+ return &c
+}
+
+func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) {
+ c.Listener = FuncListener(f)
+}
+
+func (c *Config) SetPainter(p Painter) {
+ c.Painter = p
+}
+
+func NewEx(cfg *Config) (*Instance, error) {
+ t, err := NewTerminal(cfg)
+ if err != nil {
+ return nil, err
+ }
+ rl := t.Readline()
+ if cfg.Painter == nil {
+ cfg.Painter = &defaultPainter{}
+ }
+ return &Instance{
+ Config: cfg,
+ Terminal: t,
+ Operation: rl,
+ }, nil
+}
+
+func New(prompt string) (*Instance, error) {
+ return NewEx(&Config{Prompt: prompt})
+}
+
+func (i *Instance) ResetHistory() {
+ i.Operation.ResetHistory()
+}
+
+func (i *Instance) SetPrompt(s string) {
+ i.Operation.SetPrompt(s)
+}
+
+func (i *Instance) SetMaskRune(r rune) {
+ i.Operation.SetMaskRune(r)
+}
+
+// change history persistence in runtime
+func (i *Instance) SetHistoryPath(p string) {
+ i.Operation.SetHistoryPath(p)
+}
+
+// readline will refresh automatic when write through Stdout()
+func (i *Instance) Stdout() io.Writer {
+ return i.Operation.Stdout()
+}
+
+// readline will refresh automatic when write through Stdout()
+func (i *Instance) Stderr() io.Writer {
+ return i.Operation.Stderr()
+}
+
+// switch VimMode in runtime
+func (i *Instance) SetVimMode(on bool) {
+ i.Operation.SetVimMode(on)
+}
+
+func (i *Instance) IsVimMode() bool {
+ return i.Operation.IsEnableVimMode()
+}
+
+func (i *Instance) GenPasswordConfig() *Config {
+ return i.Operation.GenPasswordConfig()
+}
+
+// we can generate a config by `i.GenPasswordConfig()`
+func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) {
+ return i.Operation.PasswordWithConfig(cfg)
+}
+
+func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) {
+ return i.Operation.PasswordEx(prompt, l)
+}
+
+func (i *Instance) ReadPassword(prompt string) ([]byte, error) {
+ return i.Operation.Password(prompt)
+}
+
+type Result struct {
+ Line string
+ Error error
+}
+
+func (l *Result) CanContinue() bool {
+ return len(l.Line) != 0 && l.Error == ErrInterrupt
+}
+
+func (l *Result) CanBreak() bool {
+ return !l.CanContinue() && l.Error != nil
+}
+
+func (i *Instance) Line() *Result {
+ ret, err := i.Readline()
+ return &Result{ret, err}
+}
+
+// err is one of (nil, io.EOF, readline.ErrInterrupt)
+func (i *Instance) Readline() (string, error) {
+ return i.Operation.String()
+}
+
+func (i *Instance) ReadlineWithDefault(what string) (string, error) {
+ i.Operation.SetBuffer(what)
+ return i.Operation.String()
+}
+
+func (i *Instance) SaveHistory(content string) error {
+ return i.Operation.SaveHistory(content)
+}
+
+// same as readline
+func (i *Instance) ReadSlice() ([]byte, error) {
+ return i.Operation.Slice()
+}
+
+// we must make sure that call Close() before process exit.
+func (i *Instance) Close() error {
+ if err := i.Terminal.Close(); err != nil {
+ return err
+ }
+ i.Config.Stdin.Close()
+ i.Operation.Close()
+ return nil
+}
+func (i *Instance) Clean() {
+ i.Operation.Clean()
+}
+
+func (i *Instance) Write(b []byte) (int, error) {
+ return i.Stdout().Write(b)
+}
+
+// WriteStdin prefill the next Stdin fetch
+// Next time you call ReadLine() this value will be writen before the user input
+// ie :
+// i := readline.New()
+// i.WriteStdin([]byte("test"))
+// _, _= i.Readline()
+//
+// gives
+//
+// > test[cursor]
+func (i *Instance) WriteStdin(val []byte) (int, error) {
+ return i.Terminal.WriteStdin(val)
+}
+
+func (i *Instance) SetConfig(cfg *Config) *Config {
+ if i.Config == cfg {
+ return cfg
+ }
+ old := i.Config
+ i.Config = cfg
+ i.Operation.SetConfig(cfg)
+ i.Terminal.SetConfig(cfg)
+ return old
+}
+
+func (i *Instance) Refresh() {
+ i.Operation.Refresh()
+}
+
+// HistoryDisable the save of the commands into the history
+func (i *Instance) HistoryDisable() {
+ i.Operation.history.Disable()
+}
+
+// HistoryEnable the save of the commands into the history (default on)
+func (i *Instance) HistoryEnable() {
+ i.Operation.history.Enable()
+}
diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go
new file mode 100644
index 00000000..74dbf569
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/remote.go
@@ -0,0 +1,475 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+type MsgType int16
+
+const (
+ T_DATA = MsgType(iota)
+ T_WIDTH
+ T_WIDTH_REPORT
+ T_ISTTY_REPORT
+ T_RAW
+ T_ERAW // exit raw
+ T_EOF
+)
+
+type RemoteSvr struct {
+ eof int32
+ closed int32
+ width int32
+ reciveChan chan struct{}
+ writeChan chan *writeCtx
+ conn net.Conn
+ isTerminal bool
+ funcWidthChan func()
+ stopChan chan struct{}
+
+ dataBufM sync.Mutex
+ dataBuf bytes.Buffer
+}
+
+type writeReply struct {
+ n int
+ err error
+}
+
+type writeCtx struct {
+ msg *Message
+ reply chan *writeReply
+}
+
+func newWriteCtx(msg *Message) *writeCtx {
+ return &writeCtx{
+ msg: msg,
+ reply: make(chan *writeReply),
+ }
+}
+
+func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) {
+ rs := &RemoteSvr{
+ width: -1,
+ conn: conn,
+ writeChan: make(chan *writeCtx),
+ reciveChan: make(chan struct{}),
+ stopChan: make(chan struct{}),
+ }
+ buf := bufio.NewReader(rs.conn)
+
+ if err := rs.init(buf); err != nil {
+ return nil, err
+ }
+
+ go rs.readLoop(buf)
+ go rs.writeLoop()
+ return rs, nil
+}
+
+func (r *RemoteSvr) init(buf *bufio.Reader) error {
+ m, err := ReadMessage(buf)
+ if err != nil {
+ return err
+ }
+ // receive isTerminal
+ if m.Type != T_ISTTY_REPORT {
+ return fmt.Errorf("unexpected init message")
+ }
+ r.GotIsTerminal(m.Data)
+
+ // receive width
+ m, err = ReadMessage(buf)
+ if err != nil {
+ return err
+ }
+ if m.Type != T_WIDTH_REPORT {
+ return fmt.Errorf("unexpected init message")
+ }
+ r.GotReportWidth(m.Data)
+
+ return nil
+}
+
+func (r *RemoteSvr) HandleConfig(cfg *Config) {
+ cfg.Stderr = r
+ cfg.Stdout = r
+ cfg.Stdin = r
+ cfg.FuncExitRaw = r.ExitRawMode
+ cfg.FuncIsTerminal = r.IsTerminal
+ cfg.FuncMakeRaw = r.EnterRawMode
+ cfg.FuncExitRaw = r.ExitRawMode
+ cfg.FuncGetWidth = r.GetWidth
+ cfg.FuncOnWidthChanged = func(f func()) {
+ r.funcWidthChan = f
+ }
+}
+
+func (r *RemoteSvr) IsTerminal() bool {
+ return r.isTerminal
+}
+
+func (r *RemoteSvr) checkEOF() error {
+ if atomic.LoadInt32(&r.eof) == 1 {
+ return io.EOF
+ }
+ return nil
+}
+
+func (r *RemoteSvr) Read(b []byte) (int, error) {
+ r.dataBufM.Lock()
+ n, err := r.dataBuf.Read(b)
+ r.dataBufM.Unlock()
+ if n == 0 {
+ if err := r.checkEOF(); err != nil {
+ return 0, err
+ }
+ }
+
+ if n == 0 && err == io.EOF {
+ <-r.reciveChan
+ r.dataBufM.Lock()
+ n, err = r.dataBuf.Read(b)
+ r.dataBufM.Unlock()
+ }
+ if n == 0 {
+ if err := r.checkEOF(); err != nil {
+ return 0, err
+ }
+ }
+
+ return n, err
+}
+
+func (r *RemoteSvr) writeMsg(m *Message) error {
+ ctx := newWriteCtx(m)
+ r.writeChan <- ctx
+ reply := <-ctx.reply
+ return reply.err
+}
+
+func (r *RemoteSvr) Write(b []byte) (int, error) {
+ ctx := newWriteCtx(NewMessage(T_DATA, b))
+ r.writeChan <- ctx
+ reply := <-ctx.reply
+ return reply.n, reply.err
+}
+
+func (r *RemoteSvr) EnterRawMode() error {
+ return r.writeMsg(NewMessage(T_RAW, nil))
+}
+
+func (r *RemoteSvr) ExitRawMode() error {
+ return r.writeMsg(NewMessage(T_ERAW, nil))
+}
+
+func (r *RemoteSvr) writeLoop() {
+ defer r.Close()
+
+loop:
+ for {
+ select {
+ case ctx, ok := <-r.writeChan:
+ if !ok {
+ break
+ }
+ n, err := ctx.msg.WriteTo(r.conn)
+ ctx.reply <- &writeReply{n, err}
+ case <-r.stopChan:
+ break loop
+ }
+ }
+}
+
+func (r *RemoteSvr) Close() error {
+ if atomic.CompareAndSwapInt32(&r.closed, 0, 1) {
+ close(r.stopChan)
+ r.conn.Close()
+ }
+ return nil
+}
+
+func (r *RemoteSvr) readLoop(buf *bufio.Reader) {
+ defer r.Close()
+ for {
+ m, err := ReadMessage(buf)
+ if err != nil {
+ break
+ }
+ switch m.Type {
+ case T_EOF:
+ atomic.StoreInt32(&r.eof, 1)
+ select {
+ case r.reciveChan <- struct{}{}:
+ default:
+ }
+ case T_DATA:
+ r.dataBufM.Lock()
+ r.dataBuf.Write(m.Data)
+ r.dataBufM.Unlock()
+ select {
+ case r.reciveChan <- struct{}{}:
+ default:
+ }
+ case T_WIDTH_REPORT:
+ r.GotReportWidth(m.Data)
+ case T_ISTTY_REPORT:
+ r.GotIsTerminal(m.Data)
+ }
+ }
+}
+
+func (r *RemoteSvr) GotIsTerminal(data []byte) {
+ if binary.BigEndian.Uint16(data) == 0 {
+ r.isTerminal = false
+ } else {
+ r.isTerminal = true
+ }
+}
+
+func (r *RemoteSvr) GotReportWidth(data []byte) {
+ atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data)))
+ if r.funcWidthChan != nil {
+ r.funcWidthChan()
+ }
+}
+
+func (r *RemoteSvr) GetWidth() int {
+ return int(atomic.LoadInt32(&r.width))
+}
+
+// -----------------------------------------------------------------------------
+
+type Message struct {
+ Type MsgType
+ Data []byte
+}
+
+func ReadMessage(r io.Reader) (*Message, error) {
+ m := new(Message)
+ var length int32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil {
+ return nil, err
+ }
+ m.Data = make([]byte, int(length)-2)
+ if _, err := io.ReadFull(r, m.Data); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func NewMessage(t MsgType, data []byte) *Message {
+ return &Message{t, data}
+}
+
+func (m *Message) WriteTo(w io.Writer) (int, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4))
+ binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2))
+ binary.Write(buf, binary.BigEndian, m.Type)
+ buf.Write(m.Data)
+ n, err := buf.WriteTo(w)
+ return int(n), err
+}
+
+// -----------------------------------------------------------------------------
+
+type RemoteCli struct {
+ conn net.Conn
+ raw RawMode
+ receiveChan chan struct{}
+ inited int32
+ isTerminal *bool
+
+ data bytes.Buffer
+ dataM sync.Mutex
+}
+
+func NewRemoteCli(conn net.Conn) (*RemoteCli, error) {
+ r := &RemoteCli{
+ conn: conn,
+ receiveChan: make(chan struct{}),
+ }
+ return r, nil
+}
+
+func (r *RemoteCli) MarkIsTerminal(is bool) {
+ r.isTerminal = &is
+}
+
+func (r *RemoteCli) init() error {
+ if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) {
+ return nil
+ }
+
+ if err := r.reportIsTerminal(); err != nil {
+ return err
+ }
+
+ if err := r.reportWidth(); err != nil {
+ return err
+ }
+
+ // register sig for width changed
+ DefaultOnWidthChanged(func() {
+ r.reportWidth()
+ })
+ return nil
+}
+
+func (r *RemoteCli) writeMsg(m *Message) error {
+ r.dataM.Lock()
+ _, err := m.WriteTo(r.conn)
+ r.dataM.Unlock()
+ return err
+}
+
+func (r *RemoteCli) Write(b []byte) (int, error) {
+ m := NewMessage(T_DATA, b)
+ r.dataM.Lock()
+ _, err := m.WriteTo(r.conn)
+ r.dataM.Unlock()
+ return len(b), err
+}
+
+func (r *RemoteCli) reportWidth() error {
+ screenWidth := GetScreenWidth()
+ data := make([]byte, 2)
+ binary.BigEndian.PutUint16(data, uint16(screenWidth))
+ msg := NewMessage(T_WIDTH_REPORT, data)
+
+ if err := r.writeMsg(msg); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *RemoteCli) reportIsTerminal() error {
+ var isTerminal bool
+ if r.isTerminal != nil {
+ isTerminal = *r.isTerminal
+ } else {
+ isTerminal = DefaultIsTerminal()
+ }
+ data := make([]byte, 2)
+ if isTerminal {
+ binary.BigEndian.PutUint16(data, 1)
+ } else {
+ binary.BigEndian.PutUint16(data, 0)
+ }
+ msg := NewMessage(T_ISTTY_REPORT, data)
+ if err := r.writeMsg(msg); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *RemoteCli) readLoop() {
+ buf := bufio.NewReader(r.conn)
+ for {
+ msg, err := ReadMessage(buf)
+ if err != nil {
+ break
+ }
+ switch msg.Type {
+ case T_ERAW:
+ r.raw.Exit()
+ case T_RAW:
+ r.raw.Enter()
+ case T_DATA:
+ os.Stdout.Write(msg.Data)
+ }
+ }
+}
+
+func (r *RemoteCli) ServeBy(source io.Reader) error {
+ if err := r.init(); err != nil {
+ return err
+ }
+
+ go func() {
+ defer r.Close()
+ for {
+ n, _ := io.Copy(r, source)
+ if n == 0 {
+ break
+ }
+ }
+ }()
+ defer r.raw.Exit()
+ r.readLoop()
+ return nil
+}
+
+func (r *RemoteCli) Close() {
+ r.writeMsg(NewMessage(T_EOF, nil))
+}
+
+func (r *RemoteCli) Serve() error {
+ return r.ServeBy(os.Stdin)
+}
+
+func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error {
+ ln, err := net.Listen(n, addr)
+ if err != nil {
+ return err
+ }
+ if len(onListen) > 0 {
+ if err := onListen[0](ln); err != nil {
+ return err
+ }
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ break
+ }
+ go func() {
+ defer conn.Close()
+ rl, err := HandleConn(*cfg, conn)
+ if err != nil {
+ return
+ }
+ h(rl)
+ }()
+ }
+ return nil
+}
+
+func HandleConn(cfg Config, conn net.Conn) (*Instance, error) {
+ r, err := NewRemoteSvr(conn)
+ if err != nil {
+ return nil, err
+ }
+ r.HandleConfig(&cfg)
+
+ rl, err := NewEx(&cfg)
+ if err != nil {
+ return nil, err
+ }
+ return rl, nil
+}
+
+func DialRemote(n, addr string) error {
+ conn, err := net.Dial(n, addr)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ cli, err := NewRemoteCli(conn)
+ if err != nil {
+ return err
+ }
+ return cli.Serve()
+}
diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go
new file mode 100644
index 00000000..81d2da50
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/runebuf.go
@@ -0,0 +1,629 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type runeBufferBck struct {
+ buf []rune
+ idx int
+}
+
+type RuneBuffer struct {
+ buf []rune
+ idx int
+ prompt []rune
+ w io.Writer
+
+ hadClean bool
+ interactive bool
+ cfg *Config
+
+ width int
+
+ bck *runeBufferBck
+
+ offset string
+
+ lastKill []rune
+
+ sync.Mutex
+}
+
+func (r* RuneBuffer) pushKill(text []rune) {
+ r.lastKill = append([]rune{}, text...)
+}
+
+func (r *RuneBuffer) OnWidthChange(newWidth int) {
+ r.Lock()
+ r.width = newWidth
+ r.Unlock()
+}
+
+func (r *RuneBuffer) Backup() {
+ r.Lock()
+ r.bck = &runeBufferBck{r.buf, r.idx}
+ r.Unlock()
+}
+
+func (r *RuneBuffer) Restore() {
+ r.Refresh(func() {
+ if r.bck == nil {
+ return
+ }
+ r.buf = r.bck.buf
+ r.idx = r.bck.idx
+ })
+}
+
+func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer {
+ rb := &RuneBuffer{
+ w: w,
+ interactive: cfg.useInteractive(),
+ cfg: cfg,
+ width: width,
+ }
+ rb.SetPrompt(prompt)
+ return rb
+}
+
+func (r *RuneBuffer) SetConfig(cfg *Config) {
+ r.Lock()
+ r.cfg = cfg
+ r.interactive = cfg.useInteractive()
+ r.Unlock()
+}
+
+func (r *RuneBuffer) SetMask(m rune) {
+ r.Lock()
+ r.cfg.MaskRune = m
+ r.Unlock()
+}
+
+func (r *RuneBuffer) CurrentWidth(x int) int {
+ r.Lock()
+ defer r.Unlock()
+ return runes.WidthAll(r.buf[:x])
+}
+
+func (r *RuneBuffer) PromptLen() int {
+ r.Lock()
+ width := r.promptLen()
+ r.Unlock()
+ return width
+}
+
+func (r *RuneBuffer) promptLen() int {
+ return runes.WidthAll(runes.ColorFilter(r.prompt))
+}
+
+func (r *RuneBuffer) RuneSlice(i int) []rune {
+ r.Lock()
+ defer r.Unlock()
+
+ if i > 0 {
+ rs := make([]rune, i)
+ copy(rs, r.buf[r.idx:r.idx+i])
+ return rs
+ }
+ rs := make([]rune, -i)
+ copy(rs, r.buf[r.idx+i:r.idx])
+ return rs
+}
+
+func (r *RuneBuffer) Runes() []rune {
+ r.Lock()
+ newr := make([]rune, len(r.buf))
+ copy(newr, r.buf)
+ r.Unlock()
+ return newr
+}
+
+func (r *RuneBuffer) Pos() int {
+ r.Lock()
+ defer r.Unlock()
+ return r.idx
+}
+
+func (r *RuneBuffer) Len() int {
+ r.Lock()
+ defer r.Unlock()
+ return len(r.buf)
+}
+
+func (r *RuneBuffer) MoveToLineStart() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ r.idx = 0
+ })
+}
+
+func (r *RuneBuffer) MoveBackward() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ r.idx--
+ })
+}
+
+func (r *RuneBuffer) WriteString(s string) {
+ r.WriteRunes([]rune(s))
+}
+
+func (r *RuneBuffer) WriteRune(s rune) {
+ r.WriteRunes([]rune{s})
+}
+
+func (r *RuneBuffer) WriteRunes(s []rune) {
+ r.Refresh(func() {
+ tail := append(s, r.buf[r.idx:]...)
+ r.buf = append(r.buf[:r.idx], tail...)
+ r.idx += len(s)
+ })
+}
+
+func (r *RuneBuffer) MoveForward() {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ r.idx++
+ })
+}
+
+func (r *RuneBuffer) IsCursorInEnd() bool {
+ r.Lock()
+ defer r.Unlock()
+ return r.idx == len(r.buf)
+}
+
+func (r *RuneBuffer) Replace(ch rune) {
+ r.Refresh(func() {
+ r.buf[r.idx] = ch
+ })
+}
+
+func (r *RuneBuffer) Erase() {
+ r.Refresh(func() {
+ r.idx = 0
+ r.pushKill(r.buf[:])
+ r.buf = r.buf[:0]
+ })
+}
+
+func (r *RuneBuffer) Delete() (success bool) {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ r.pushKill(r.buf[r.idx : r.idx+1])
+ r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
+ success = true
+ })
+ return
+}
+
+func (r *RuneBuffer) DeleteWord() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ init := r.idx
+ for init < len(r.buf) && IsWordBreak(r.buf[init]) {
+ init++
+ }
+ for i := init + 1; i < len(r.buf); i++ {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.pushKill(r.buf[r.idx:i-1])
+ r.Refresh(func() {
+ r.buf = append(r.buf[:r.idx], r.buf[i-1:]...)
+ })
+ return
+ }
+ }
+ r.Kill()
+}
+
+func (r *RuneBuffer) MoveToPrevWord() (success bool) {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ for i := r.idx - 1; i > 0; i-- {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.idx = i
+ success = true
+ return
+ }
+ }
+ r.idx = 0
+ success = true
+ })
+ return
+}
+
+func (r *RuneBuffer) KillFront() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ length := len(r.buf) - r.idx
+ r.pushKill(r.buf[:r.idx])
+ copy(r.buf[:length], r.buf[r.idx:])
+ r.idx = 0
+ r.buf = r.buf[:length]
+ })
+}
+
+func (r *RuneBuffer) Kill() {
+ r.Refresh(func() {
+ r.pushKill(r.buf[r.idx:])
+ r.buf = r.buf[:r.idx]
+ })
+}
+
+func (r *RuneBuffer) Transpose() {
+ r.Refresh(func() {
+ if len(r.buf) == 1 {
+ r.idx++
+ }
+
+ if len(r.buf) < 2 {
+ return
+ }
+
+ if r.idx == 0 {
+ r.idx = 1
+ } else if r.idx >= len(r.buf) {
+ r.idx = len(r.buf) - 1
+ }
+ r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx]
+ r.idx++
+ })
+}
+
+func (r *RuneBuffer) MoveToNextWord() {
+ r.Refresh(func() {
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.idx = i
+ return
+ }
+ }
+
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) MoveToEndWord() {
+ r.Refresh(func() {
+ // already at the end, so do nothing
+ if r.idx == len(r.buf) {
+ return
+ }
+ // if we are at the end of a word already, go to next
+ if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) {
+ r.idx++
+ }
+
+ // keep going until at the end of a word
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) {
+ r.idx = i - 1
+ return
+ }
+ }
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) BackEscapeWord() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ for i := r.idx - 1; i > 0; i-- {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.pushKill(r.buf[i:r.idx])
+ r.buf = append(r.buf[:i], r.buf[r.idx:]...)
+ r.idx = i
+ return
+ }
+ }
+
+ r.buf = r.buf[:0]
+ r.idx = 0
+ })
+}
+
+func (r *RuneBuffer) Yank() {
+ if len(r.lastKill) == 0 {
+ return
+ }
+ r.Refresh(func() {
+ buf := make([]rune, 0, len(r.buf) + len(r.lastKill))
+ buf = append(buf, r.buf[:r.idx]...)
+ buf = append(buf, r.lastKill...)
+ buf = append(buf, r.buf[r.idx:]...)
+ r.buf = buf
+ r.idx += len(r.lastKill)
+ })
+}
+
+func (r *RuneBuffer) Backspace() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ r.idx--
+ r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
+ })
+}
+
+func (r *RuneBuffer) MoveToLineEnd() {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) LineCount(width int) int {
+ if width == -1 {
+ width = r.width
+ }
+ return LineCount(width,
+ runes.WidthAll(r.buf)+r.PromptLen())
+}
+
+func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) {
+ r.Refresh(func() {
+ if reverse {
+ for i := r.idx - 1; i >= 0; i-- {
+ if r.buf[i] == ch {
+ r.idx = i
+ if prevChar {
+ r.idx++
+ }
+ success = true
+ return
+ }
+ }
+ return
+ }
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if r.buf[i] == ch {
+ r.idx = i
+ if prevChar {
+ r.idx--
+ }
+ success = true
+ return
+ }
+ }
+ })
+ return
+}
+
+func (r *RuneBuffer) isInLineEdge() bool {
+ if isWindows {
+ return false
+ }
+ sp := r.getSplitByLine(r.buf)
+ return len(sp[len(sp)-1]) == 0
+}
+
+func (r *RuneBuffer) getSplitByLine(rs []rune) []string {
+ return SplitByLine(r.promptLen(), r.width, rs)
+}
+
+func (r *RuneBuffer) IdxLine(width int) int {
+ r.Lock()
+ defer r.Unlock()
+ return r.idxLine(width)
+}
+
+func (r *RuneBuffer) idxLine(width int) int {
+ if width == 0 {
+ return 0
+ }
+ sp := r.getSplitByLine(r.buf[:r.idx])
+ return len(sp) - 1
+}
+
+func (r *RuneBuffer) CursorLineCount() int {
+ return r.LineCount(r.width) - r.IdxLine(r.width)
+}
+
+func (r *RuneBuffer) Refresh(f func()) {
+ r.Lock()
+ defer r.Unlock()
+
+ if !r.interactive {
+ if f != nil {
+ f()
+ }
+ return
+ }
+
+ r.clean()
+ if f != nil {
+ f()
+ }
+ r.print()
+}
+
+func (r *RuneBuffer) SetOffset(offset string) {
+ r.Lock()
+ r.offset = offset
+ r.Unlock()
+}
+
+func (r *RuneBuffer) print() {
+ r.w.Write(r.output())
+ r.hadClean = false
+}
+
+func (r *RuneBuffer) output() []byte {
+ buf := bytes.NewBuffer(nil)
+ buf.WriteString(string(r.prompt))
+ if r.cfg.EnableMask && len(r.buf) > 0 {
+ buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1)))
+ if r.buf[len(r.buf)-1] == '\n' {
+ buf.Write([]byte{'\n'})
+ } else {
+ buf.Write([]byte(string(r.cfg.MaskRune)))
+ }
+ if len(r.buf) > r.idx {
+ buf.Write(r.getBackspaceSequence())
+ }
+
+ } else {
+ for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) {
+ if e == '\t' {
+ buf.WriteString(strings.Repeat(" ", TabWidth))
+ } else {
+ buf.WriteRune(e)
+ }
+ }
+ if r.isInLineEdge() {
+ buf.Write([]byte(" \b"))
+ }
+ }
+ // cursor position
+ if len(r.buf) > r.idx {
+ buf.Write(r.getBackspaceSequence())
+ }
+ return buf.Bytes()
+}
+
+func (r *RuneBuffer) getBackspaceSequence() []byte {
+ var sep = map[int]bool{}
+
+ var i int
+ for {
+ if i >= runes.WidthAll(r.buf) {
+ break
+ }
+
+ if i == 0 {
+ i -= r.promptLen()
+ }
+ i += r.width
+
+ sep[i] = true
+ }
+ var buf []byte
+ for i := len(r.buf); i > r.idx; i-- {
+ // move input to the left of one
+ buf = append(buf, '\b')
+ if sep[i] {
+ // up one line, go to the start of the line and move cursor right to the end (r.width)
+ buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...)
+ }
+ }
+
+ return buf
+
+}
+
+func (r *RuneBuffer) Reset() []rune {
+ ret := runes.Copy(r.buf)
+ r.buf = r.buf[:0]
+ r.idx = 0
+ return ret
+}
+
+func (r *RuneBuffer) calWidth(m int) int {
+ if m > 0 {
+ return runes.WidthAll(r.buf[r.idx : r.idx+m])
+ }
+ return runes.WidthAll(r.buf[r.idx+m : r.idx])
+}
+
+func (r *RuneBuffer) SetStyle(start, end int, style string) {
+ if end < start {
+ panic("end < start")
+ }
+
+ // goto start
+ move := start - r.idx
+ if move > 0 {
+ r.w.Write([]byte(string(r.buf[r.idx : r.idx+move])))
+ } else {
+ r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move)))
+ }
+ r.w.Write([]byte("\033[" + style + "m"))
+ r.w.Write([]byte(string(r.buf[start:end])))
+ r.w.Write([]byte("\033[0m"))
+ // TODO: move back
+}
+
+func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) {
+ r.Refresh(func() {
+ r.buf = buf
+ r.idx = idx
+ })
+}
+
+func (r *RuneBuffer) Set(buf []rune) {
+ r.SetWithIdx(len(buf), buf)
+}
+
+func (r *RuneBuffer) SetPrompt(prompt string) {
+ r.Lock()
+ r.prompt = []rune(prompt)
+ r.Unlock()
+}
+
+func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) {
+ buf := bufio.NewWriter(w)
+
+ if r.width == 0 {
+ buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen()))
+ buf.Write([]byte("\033[J"))
+ } else {
+ buf.Write([]byte("\033[J")) // just like ^k :)
+ if idxLine == 0 {
+ buf.WriteString("\033[2K")
+ buf.WriteString("\r")
+ } else {
+ for i := 0; i < idxLine; i++ {
+ io.WriteString(buf, "\033[2K\r\033[A")
+ }
+ io.WriteString(buf, "\033[2K\r")
+ }
+ }
+ buf.Flush()
+ return
+}
+
+func (r *RuneBuffer) Clean() {
+ r.Lock()
+ r.clean()
+ r.Unlock()
+}
+
+func (r *RuneBuffer) clean() {
+ r.cleanWithIdxLine(r.idxLine(r.width))
+}
+
+func (r *RuneBuffer) cleanWithIdxLine(idxLine int) {
+ if r.hadClean || !r.interactive {
+ return
+ }
+ r.hadClean = true
+ r.cleanOutput(r.w, idxLine)
+}
diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go
new file mode 100644
index 00000000..a669bc48
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/runes.go
@@ -0,0 +1,223 @@
+package readline
+
+import (
+ "bytes"
+ "unicode"
+ "unicode/utf8"
+)
+
+var runes = Runes{}
+var TabWidth = 4
+
+type Runes struct{}
+
+func (Runes) EqualRune(a, b rune, fold bool) bool {
+ if a == b {
+ return true
+ }
+ if !fold {
+ return false
+ }
+ if a > b {
+ a, b = b, a
+ }
+ if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' {
+ if b == a+'a'-'A' {
+ return true
+ }
+ }
+ return false
+}
+
+func (r Runes) EqualRuneFold(a, b rune) bool {
+ return r.EqualRune(a, b, true)
+}
+
+func (r Runes) EqualFold(a, b []rune) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if r.EqualRuneFold(a[i], b[i]) {
+ continue
+ }
+ return false
+ }
+
+ return true
+}
+
+func (Runes) Equal(a, b []rune) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int {
+ for i := len(r) - len(sub); i >= 0; i-- {
+ found := true
+ for j := 0; j < len(sub); j++ {
+ if !rs.EqualRune(r[i+j], sub[j], fold) {
+ found = false
+ break
+ }
+ }
+ if found {
+ return i
+ }
+ }
+ return -1
+}
+
+// Search in runes from end to front
+func (rs Runes) IndexAllBck(r, sub []rune) int {
+ return rs.IndexAllBckEx(r, sub, false)
+}
+
+// Search in runes from front to end
+func (rs Runes) IndexAll(r, sub []rune) int {
+ return rs.IndexAllEx(r, sub, false)
+}
+
+func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int {
+ for i := 0; i < len(r); i++ {
+ found := true
+ if len(r[i:]) < len(sub) {
+ return -1
+ }
+ for j := 0; j < len(sub); j++ {
+ if !rs.EqualRune(r[i+j], sub[j], fold) {
+ found = false
+ break
+ }
+ }
+ if found {
+ return i
+ }
+ }
+ return -1
+}
+
+func (Runes) Index(r rune, rs []rune) int {
+ for i := 0; i < len(rs); i++ {
+ if rs[i] == r {
+ return i
+ }
+ }
+ return -1
+}
+
+func (Runes) ColorFilter(r []rune) []rune {
+ newr := make([]rune, 0, len(r))
+ for pos := 0; pos < len(r); pos++ {
+ if r[pos] == '\033' && r[pos+1] == '[' {
+ idx := runes.Index('m', r[pos+2:])
+ if idx == -1 {
+ continue
+ }
+ pos += idx + 2
+ continue
+ }
+ newr = append(newr, r[pos])
+ }
+ return newr
+}
+
+var zeroWidth = []*unicode.RangeTable{
+ unicode.Mn,
+ unicode.Me,
+ unicode.Cc,
+ unicode.Cf,
+}
+
+var doubleWidth = []*unicode.RangeTable{
+ unicode.Han,
+ unicode.Hangul,
+ unicode.Hiragana,
+ unicode.Katakana,
+}
+
+func (Runes) Width(r rune) int {
+ if r == '\t' {
+ return TabWidth
+ }
+ if unicode.IsOneOf(zeroWidth, r) {
+ return 0
+ }
+ if unicode.IsOneOf(doubleWidth, r) {
+ return 2
+ }
+ return 1
+}
+
+func (Runes) WidthAll(r []rune) (length int) {
+ for i := 0; i < len(r); i++ {
+ length += runes.Width(r[i])
+ }
+ return
+}
+
+func (Runes) Backspace(r []rune) []byte {
+ return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r))
+}
+
+func (Runes) Copy(r []rune) []rune {
+ n := make([]rune, len(r))
+ copy(n, r)
+ return n
+}
+
+func (Runes) HasPrefixFold(r, prefix []rune) bool {
+ if len(r) < len(prefix) {
+ return false
+ }
+ return runes.EqualFold(r[:len(prefix)], prefix)
+}
+
+func (Runes) HasPrefix(r, prefix []rune) bool {
+ if len(r) < len(prefix) {
+ return false
+ }
+ return runes.Equal(r[:len(prefix)], prefix)
+}
+
+func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) {
+ for i := 0; i < len(candicate[0]); i++ {
+ for j := 0; j < len(candicate)-1; j++ {
+ if i >= len(candicate[j]) || i >= len(candicate[j+1]) {
+ goto aggregate
+ }
+ if candicate[j][i] != candicate[j+1][i] {
+ goto aggregate
+ }
+ }
+ size = i + 1
+ }
+aggregate:
+ if size > 0 {
+ same = runes.Copy(candicate[0][:size])
+ for i := 0; i < len(candicate); i++ {
+ n := runes.Copy(candicate[i])
+ copy(n, n[size:])
+ candicate[i] = n[:len(n)-size]
+ }
+ }
+ return
+}
+
+func (Runes) TrimSpaceLeft(in []rune) []rune {
+ firstIndex := len(in)
+ for i, r := range in {
+ if unicode.IsSpace(r) == false {
+ firstIndex = i
+ break
+ }
+ }
+ return in[firstIndex:]
+}
diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go
new file mode 100644
index 00000000..52e8ff09
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/search.go
@@ -0,0 +1,164 @@
+package readline
+
+import (
+ "bytes"
+ "container/list"
+ "fmt"
+ "io"
+)
+
+const (
+ S_STATE_FOUND = iota
+ S_STATE_FAILING
+)
+
+const (
+ S_DIR_BCK = iota
+ S_DIR_FWD
+)
+
+type opSearch struct {
+ inMode bool
+ state int
+ dir int
+ source *list.Element
+ w io.Writer
+ buf *RuneBuffer
+ data []rune
+ history *opHistory
+ cfg *Config
+ markStart int
+ markEnd int
+ width int
+}
+
+func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch {
+ return &opSearch{
+ w: w,
+ buf: buf,
+ cfg: cfg,
+ history: history,
+ width: width,
+ }
+}
+
+func (o *opSearch) OnWidthChange(newWidth int) {
+ o.width = newWidth
+}
+
+func (o *opSearch) IsSearchMode() bool {
+ return o.inMode
+}
+
+func (o *opSearch) SearchBackspace() {
+ if len(o.data) > 0 {
+ o.data = o.data[:len(o.data)-1]
+ o.search(true)
+ }
+}
+
+func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) {
+ if o.dir == S_DIR_BCK {
+ return o.history.FindBck(isNewSearch, o.data, o.buf.idx)
+ }
+ return o.history.FindFwd(isNewSearch, o.data, o.buf.idx)
+}
+
+func (o *opSearch) search(isChange bool) bool {
+ if len(o.data) == 0 {
+ o.state = S_STATE_FOUND
+ o.SearchRefresh(-1)
+ return true
+ }
+ idx, elem := o.findHistoryBy(isChange)
+ if elem == nil {
+ o.SearchRefresh(-2)
+ return false
+ }
+ o.history.current = elem
+
+ item := o.history.showItem(o.history.current.Value)
+ start, end := 0, 0
+ if o.dir == S_DIR_BCK {
+ start, end = idx, idx+len(o.data)
+ } else {
+ start, end = idx, idx+len(o.data)
+ idx += len(o.data)
+ }
+ o.buf.SetWithIdx(idx, item)
+ o.markStart, o.markEnd = start, end
+ o.SearchRefresh(idx)
+ return true
+}
+
+func (o *opSearch) SearchChar(r rune) {
+ o.data = append(o.data, r)
+ o.search(true)
+}
+
+func (o *opSearch) SearchMode(dir int) bool {
+ if o.width == 0 {
+ return false
+ }
+ alreadyInMode := o.inMode
+ o.inMode = true
+ o.dir = dir
+ o.source = o.history.current
+ if alreadyInMode {
+ o.search(false)
+ } else {
+ o.SearchRefresh(-1)
+ }
+ return true
+}
+
+func (o *opSearch) ExitSearchMode(revert bool) {
+ if revert {
+ o.history.current = o.source
+ o.buf.Set(o.history.showItem(o.history.current.Value))
+ }
+ o.markStart, o.markEnd = 0, 0
+ o.state = S_STATE_FOUND
+ o.inMode = false
+ o.source = nil
+ o.data = nil
+}
+
+func (o *opSearch) SearchRefresh(x int) {
+ if x == -2 {
+ o.state = S_STATE_FAILING
+ } else if x >= 0 {
+ o.state = S_STATE_FOUND
+ }
+ if x < 0 {
+ x = o.buf.idx
+ }
+ x = o.buf.CurrentWidth(x)
+ x += o.buf.PromptLen()
+ x = x % o.width
+
+ if o.markStart > 0 {
+ o.buf.SetStyle(o.markStart, o.markEnd, "4")
+ }
+
+ lineCnt := o.buf.CursorLineCount()
+ buf := bytes.NewBuffer(nil)
+ buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
+ buf.WriteString("\033[J")
+ if o.state == S_STATE_FAILING {
+ buf.WriteString("failing ")
+ }
+ if o.dir == S_DIR_BCK {
+ buf.WriteString("bck")
+ } else if o.dir == S_DIR_FWD {
+ buf.WriteString("fwd")
+ }
+ buf.WriteString("-i-search: ")
+ buf.WriteString(string(o.data)) // keyword
+ buf.WriteString("\033[4m \033[0m") // _
+ fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev
+ if x > 0 {
+ fmt.Fprintf(buf, "\033[%dC", x) // move forward
+ }
+ o.w.Write(buf.Bytes())
+}
diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go
new file mode 100644
index 00000000..61d44b75
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/std.go
@@ -0,0 +1,197 @@
+package readline
+
+import (
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+var (
+ Stdin io.ReadCloser = os.Stdin
+ Stdout io.WriteCloser = os.Stdout
+ Stderr io.WriteCloser = os.Stderr
+)
+
+var (
+ std *Instance
+ stdOnce sync.Once
+)
+
+// global instance will not submit history automatic
+func getInstance() *Instance {
+ stdOnce.Do(func() {
+ std, _ = NewEx(&Config{
+ DisableAutoSaveHistory: true,
+ })
+ })
+ return std
+}
+
+// let readline load history from filepath
+// and try to persist history into disk
+// set fp to "" to prevent readline persisting history to disk
+// so the `AddHistory` will return nil error forever.
+func SetHistoryPath(fp string) {
+ ins := getInstance()
+ cfg := ins.Config.Clone()
+ cfg.HistoryFile = fp
+ ins.SetConfig(cfg)
+}
+
+// set auto completer to global instance
+func SetAutoComplete(completer AutoCompleter) {
+ ins := getInstance()
+ cfg := ins.Config.Clone()
+ cfg.AutoComplete = completer
+ ins.SetConfig(cfg)
+}
+
+// add history to global instance manually
+// raise error only if `SetHistoryPath` is set with a non-empty path
+func AddHistory(content string) error {
+ ins := getInstance()
+ return ins.SaveHistory(content)
+}
+
+func Password(prompt string) ([]byte, error) {
+ ins := getInstance()
+ return ins.ReadPassword(prompt)
+}
+
+// readline with global configs
+func Line(prompt string) (string, error) {
+ ins := getInstance()
+ ins.SetPrompt(prompt)
+ return ins.Readline()
+}
+
+type CancelableStdin struct {
+ r io.Reader
+ mutex sync.Mutex
+ stop chan struct{}
+ closed int32
+ notify chan struct{}
+ data []byte
+ read int
+ err error
+}
+
+func NewCancelableStdin(r io.Reader) *CancelableStdin {
+ c := &CancelableStdin{
+ r: r,
+ notify: make(chan struct{}),
+ stop: make(chan struct{}),
+ }
+ go c.ioloop()
+ return c
+}
+
+func (c *CancelableStdin) ioloop() {
+loop:
+ for {
+ select {
+ case <-c.notify:
+ c.read, c.err = c.r.Read(c.data)
+ select {
+ case c.notify <- struct{}{}:
+ case <-c.stop:
+ break loop
+ }
+ case <-c.stop:
+ break loop
+ }
+ }
+}
+
+func (c *CancelableStdin) Read(b []byte) (n int, err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if atomic.LoadInt32(&c.closed) == 1 {
+ return 0, io.EOF
+ }
+
+ c.data = b
+ select {
+ case c.notify <- struct{}{}:
+ case <-c.stop:
+ return 0, io.EOF
+ }
+ select {
+ case <-c.notify:
+ return c.read, c.err
+ case <-c.stop:
+ return 0, io.EOF
+ }
+}
+
+func (c *CancelableStdin) Close() error {
+ if atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
+ close(c.stop)
+ }
+ return nil
+}
+
+// FillableStdin is a stdin reader which can prepend some data before
+// reading into the real stdin
+type FillableStdin struct {
+ sync.Mutex
+ stdin io.Reader
+ stdinBuffer io.ReadCloser
+ buf []byte
+ bufErr error
+}
+
+// NewFillableStdin gives you FillableStdin
+func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) {
+ r, w := io.Pipe()
+ s := &FillableStdin{
+ stdinBuffer: r,
+ stdin: stdin,
+ }
+ s.ioloop()
+ return s, w
+}
+
+func (s *FillableStdin) ioloop() {
+ go func() {
+ for {
+ bufR := make([]byte, 100)
+ var n int
+ n, s.bufErr = s.stdinBuffer.Read(bufR)
+ if s.bufErr != nil {
+ if s.bufErr == io.ErrClosedPipe {
+ break
+ }
+ }
+ s.Lock()
+ s.buf = append(s.buf, bufR[:n]...)
+ s.Unlock()
+ }
+ }()
+}
+
+// Read will read from the local buffer and if no data, read from stdin
+func (s *FillableStdin) Read(p []byte) (n int, err error) {
+ s.Lock()
+ i := len(s.buf)
+ if len(p) < i {
+ i = len(p)
+ }
+ if i > 0 {
+ n := copy(p, s.buf)
+ s.buf = s.buf[:0]
+ cerr := s.bufErr
+ s.bufErr = nil
+ s.Unlock()
+ return n, cerr
+ }
+ s.Unlock()
+ n, err = s.stdin.Read(p)
+ return n, err
+}
+
+func (s *FillableStdin) Close() error {
+ s.stdinBuffer.Close()
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go
new file mode 100644
index 00000000..b10f91bc
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/std_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package readline
+
+func init() {
+ Stdin = NewRawReader()
+ Stdout = NewANSIWriter(Stdout)
+ Stderr = NewANSIWriter(Stderr)
+}
diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go
new file mode 100644
index 00000000..133993ca
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package readline
+
+import (
+ "io"
+ "syscall"
+)
+
+// State contains the state of a terminal.
+type State struct {
+ termios Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ _, err := getTermios(fd)
+ return err == nil
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var oldState State
+
+ if termios, err := getTermios(fd); err != nil {
+ return nil, err
+ } else {
+ oldState.termios = *termios
+ }
+
+ newState := oldState.termios
+ // This attempts to replicate the behaviour documented for cfmakeraw in
+ // the termios(3) manpage.
+ newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
+ // newState.Oflag &^= syscall.OPOST
+ newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
+ newState.Cflag &^= syscall.CSIZE | syscall.PARENB
+ newState.Cflag |= syscall.CS8
+
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ return &oldState, setTermios(fd, &newState)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ termios, err := getTermios(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return &State{termios: *termios}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func restoreTerm(fd int, state *State) error {
+ return setTermios(fd, &state.termios)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ oldState, err := getTermios(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ newState := oldState
+ newState.Lflag &^= syscall.ECHO
+ newState.Lflag |= syscall.ICANON | syscall.ISIG
+ newState.Iflag |= syscall.ICRNL
+ if err := setTermios(fd, newState); err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ setTermios(fd, oldState)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(fd, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go
new file mode 100644
index 00000000..68b56ea6
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_bsd.go
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getTermios(fd int) (*Termios, error) {
+ termios := new(Termios)
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return nil, err
+ }
+ return termios, nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go
new file mode 100644
index 00000000..e3392b4a
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_linux.go
@@ -0,0 +1,33 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// These constants are declared here, rather than importing
+// them from the syscall package as some syscall packages, even
+// on linux, for example gccgo, do not declare them.
+const ioctlReadTermios = 0x5401 // syscall.TCGETS
+const ioctlWriteTermios = 0x5402 // syscall.TCSETS
+
+func getTermios(fd int) (*Termios, error) {
+ termios := new(Termios)
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return nil, err
+ }
+ return termios, nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go
new file mode 100644
index 00000000..4c27273c
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_solaris.go
@@ -0,0 +1,32 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package readline
+
+import "golang.org/x/sys/unix"
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (int, int, error) {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(ws.Col), int(ws.Row), nil
+}
+
+type Termios unix.Termios
+
+func getTermios(fd int) (*Termios, error) {
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+ return (*Termios)(termios), nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios))
+}
diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go
new file mode 100644
index 00000000..d3ea2424
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type Termios syscall.Termios
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (int, int, error) {
+ var dimensions [4]uint16
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0)
+ if err != 0 {
+ return 0, 0, err
+ }
+ return int(dimensions[1]), int(dimensions[0]), nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go
new file mode 100644
index 00000000..1290e00b
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_windows.go
@@ -0,0 +1,171 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package readline
+
+import (
+ "io"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ enableLineInput = 2
+ enableEchoInput = 4
+ enableProcessedInput = 1
+ enableWindowInput = 8
+ enableMouseInput = 16
+ enableInsertMode = 32
+ enableQuickEditMode = 64
+ enableExtendedFlags = 128
+ enableAutoPosition = 256
+ enableProcessedOutput = 1
+ enableWrapAtEolOutput = 2
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+)
+
+type (
+ coord struct {
+ x short
+ y short
+ }
+ smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+ }
+)
+
+type State struct {
+ mode uint32
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func restoreTerm(fd int, state *State) error {
+ _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
+ return err
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+ var info consoleScreenBufferInfo
+ _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
+ if e != 0 {
+ return 0, 0, error(e)
+ }
+ return int(info.size.x), int(info.size.y), nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ old := st
+
+ st &^= (enableEchoInput)
+ st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+
+ defer func() {
+ syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(syscall.Handle(fd), buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ if n > 0 && buf[n-1] == '\r' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go
new file mode 100644
index 00000000..1078631c
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/terminal.go
@@ -0,0 +1,238 @@
+package readline
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type Terminal struct {
+ m sync.Mutex
+ cfg *Config
+ outchan chan rune
+ closed int32
+ stopChan chan struct{}
+ kickChan chan struct{}
+ wg sync.WaitGroup
+ isReading int32
+ sleeping int32
+
+ sizeChan chan string
+}
+
+func NewTerminal(cfg *Config) (*Terminal, error) {
+ if err := cfg.Init(); err != nil {
+ return nil, err
+ }
+ t := &Terminal{
+ cfg: cfg,
+ kickChan: make(chan struct{}, 1),
+ outchan: make(chan rune),
+ stopChan: make(chan struct{}, 1),
+ sizeChan: make(chan string, 1),
+ }
+
+ go t.ioloop()
+ return t, nil
+}
+
+// SleepToResume will sleep myself, and return only if I'm resumed.
+func (t *Terminal) SleepToResume() {
+ if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) {
+ return
+ }
+ defer atomic.StoreInt32(&t.sleeping, 0)
+
+ t.ExitRawMode()
+ ch := WaitForResume()
+ SuspendMe()
+ <-ch
+ t.EnterRawMode()
+}
+
+func (t *Terminal) EnterRawMode() (err error) {
+ return t.cfg.FuncMakeRaw()
+}
+
+func (t *Terminal) ExitRawMode() (err error) {
+ return t.cfg.FuncExitRaw()
+}
+
+func (t *Terminal) Write(b []byte) (int, error) {
+ return t.cfg.Stdout.Write(b)
+}
+
+// WriteStdin prefill the next Stdin fetch
+// Next time you call ReadLine() this value will be writen before the user input
+func (t *Terminal) WriteStdin(b []byte) (int, error) {
+ return t.cfg.StdinWriter.Write(b)
+}
+
+type termSize struct {
+ left int
+ top int
+}
+
+func (t *Terminal) GetOffset(f func(offset string)) {
+ go func() {
+ f(<-t.sizeChan)
+ }()
+ t.Write([]byte("\033[6n"))
+}
+
+func (t *Terminal) Print(s string) {
+ fmt.Fprintf(t.cfg.Stdout, "%s", s)
+}
+
+func (t *Terminal) PrintRune(r rune) {
+ fmt.Fprintf(t.cfg.Stdout, "%c", r)
+}
+
+func (t *Terminal) Readline() *Operation {
+ return NewOperation(t, t.cfg)
+}
+
+// return rune(0) if meet EOF
+func (t *Terminal) ReadRune() rune {
+ ch, ok := <-t.outchan
+ if !ok {
+ return rune(0)
+ }
+ return ch
+}
+
+func (t *Terminal) IsReading() bool {
+ return atomic.LoadInt32(&t.isReading) == 1
+}
+
+func (t *Terminal) KickRead() {
+ select {
+ case t.kickChan <- struct{}{}:
+ default:
+ }
+}
+
+func (t *Terminal) ioloop() {
+ t.wg.Add(1)
+ defer func() {
+ t.wg.Done()
+ close(t.outchan)
+ }()
+
+ var (
+ isEscape bool
+ isEscapeEx bool
+ expectNextChar bool
+ )
+
+ buf := bufio.NewReader(t.getStdin())
+ for {
+ if !expectNextChar {
+ atomic.StoreInt32(&t.isReading, 0)
+ select {
+ case <-t.kickChan:
+ atomic.StoreInt32(&t.isReading, 1)
+ case <-t.stopChan:
+ return
+ }
+ }
+ expectNextChar = false
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ if strings.Contains(err.Error(), "interrupted system call") {
+ expectNextChar = true
+ continue
+ }
+ break
+ }
+
+ if isEscape {
+ isEscape = false
+ if r == CharEscapeEx {
+ expectNextChar = true
+ isEscapeEx = true
+ continue
+ }
+ r = escapeKey(r, buf)
+ } else if isEscapeEx {
+ isEscapeEx = false
+ if key := readEscKey(r, buf); key != nil {
+ r = escapeExKey(key)
+ // offset
+ if key.typ == 'R' {
+ if _, _, ok := key.Get2(); ok {
+ select {
+ case t.sizeChan <- key.attr:
+ default:
+ }
+ }
+ expectNextChar = true
+ continue
+ }
+ }
+ if r == 0 {
+ expectNextChar = true
+ continue
+ }
+ }
+
+ expectNextChar = true
+ switch r {
+ case CharEsc:
+ if t.cfg.VimMode {
+ t.outchan <- r
+ break
+ }
+ isEscape = true
+ case CharInterrupt, CharEnter, CharCtrlJ, CharDelete:
+ expectNextChar = false
+ fallthrough
+ default:
+ t.outchan <- r
+ }
+ }
+
+}
+
+func (t *Terminal) Bell() {
+ fmt.Fprintf(t, "%c", CharBell)
+}
+
+func (t *Terminal) Close() error {
+ if atomic.SwapInt32(&t.closed, 1) != 0 {
+ return nil
+ }
+ if closer, ok := t.cfg.Stdin.(io.Closer); ok {
+ closer.Close()
+ }
+ close(t.stopChan)
+ t.wg.Wait()
+ return t.ExitRawMode()
+}
+
+func (t *Terminal) GetConfig() *Config {
+ t.m.Lock()
+ cfg := *t.cfg
+ t.m.Unlock()
+ return &cfg
+}
+
+func (t *Terminal) getStdin() io.Reader {
+ t.m.Lock()
+ r := t.cfg.Stdin
+ t.m.Unlock()
+ return r
+}
+
+func (t *Terminal) SetConfig(c *Config) error {
+ if err := c.Init(); err != nil {
+ return err
+ }
+ t.m.Lock()
+ t.cfg = c
+ t.m.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go
new file mode 100644
index 00000000..af4e0052
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils.go
@@ -0,0 +1,277 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "container/list"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+)
+
+var (
+ isWindows = false
+)
+
+const (
+ CharLineStart = 1
+ CharBackward = 2
+ CharInterrupt = 3
+ CharDelete = 4
+ CharLineEnd = 5
+ CharForward = 6
+ CharBell = 7
+ CharCtrlH = 8
+ CharTab = 9
+ CharCtrlJ = 10
+ CharKill = 11
+ CharCtrlL = 12
+ CharEnter = 13
+ CharNext = 14
+ CharPrev = 16
+ CharBckSearch = 18
+ CharFwdSearch = 19
+ CharTranspose = 20
+ CharCtrlU = 21
+ CharCtrlW = 23
+ CharCtrlY = 25
+ CharCtrlZ = 26
+ CharEsc = 27
+ CharEscapeEx = 91
+ CharBackspace = 127
+)
+
+const (
+ MetaBackward rune = -iota - 1
+ MetaForward
+ MetaDelete
+ MetaBackspace
+ MetaTranspose
+)
+
+// WaitForResume need to call before current process got suspend.
+// It will run a ticker until a long duration is occurs,
+// which means this process is resumed.
+func WaitForResume() chan struct{} {
+ ch := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ ticker := time.NewTicker(10 * time.Millisecond)
+ t := time.Now()
+ wg.Done()
+ for {
+ now := <-ticker.C
+ if now.Sub(t) > 100*time.Millisecond {
+ break
+ }
+ t = now
+ }
+ ticker.Stop()
+ ch <- struct{}{}
+ }()
+ wg.Wait()
+ return ch
+}
+
+func Restore(fd int, state *State) error {
+ err := restoreTerm(fd, state)
+ if err != nil {
+ // errno 0 means everything is ok :)
+ if err.Error() == "errno 0" {
+ return nil
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
+func IsPrintable(key rune) bool {
+ isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
+ return key >= 32 && !isInSurrogateArea
+}
+
+// translate Esc[X
+func escapeExKey(key *escapeKeyPair) rune {
+ var r rune
+ switch key.typ {
+ case 'D':
+ r = CharBackward
+ case 'C':
+ r = CharForward
+ case 'A':
+ r = CharPrev
+ case 'B':
+ r = CharNext
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ case '~':
+ if key.attr == "3" {
+ r = CharDelete
+ }
+ default:
+ }
+ return r
+}
+
+type escapeKeyPair struct {
+ attr string
+ typ rune
+}
+
+func (e *escapeKeyPair) Get2() (int, int, bool) {
+ sp := strings.Split(e.attr, ";")
+ if len(sp) < 2 {
+ return -1, -1, false
+ }
+ s1, err := strconv.Atoi(sp[0])
+ if err != nil {
+ return -1, -1, false
+ }
+ s2, err := strconv.Atoi(sp[1])
+ if err != nil {
+ return -1, -1, false
+ }
+ return s1, s2, true
+}
+
+func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair {
+ p := escapeKeyPair{}
+ buf := bytes.NewBuffer(nil)
+ for {
+ if r == ';' {
+ } else if unicode.IsNumber(r) {
+ } else {
+ p.typ = r
+ break
+ }
+ buf.WriteRune(r)
+ r, _, _ = reader.ReadRune()
+ }
+ p.attr = buf.String()
+ return &p
+}
+
+// translate EscX to Meta+X
+func escapeKey(r rune, reader *bufio.Reader) rune {
+ switch r {
+ case 'b':
+ r = MetaBackward
+ case 'f':
+ r = MetaForward
+ case 'd':
+ r = MetaDelete
+ case CharTranspose:
+ r = MetaTranspose
+ case CharBackspace:
+ r = MetaBackspace
+ case 'O':
+ d, _, _ := reader.ReadRune()
+ switch d {
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ default:
+ reader.UnreadRune()
+ }
+ case CharEsc:
+
+ }
+ return r
+}
+
+func SplitByLine(start, screenWidth int, rs []rune) []string {
+ var ret []string
+ buf := bytes.NewBuffer(nil)
+ currentWidth := start
+ for _, r := range rs {
+ w := runes.Width(r)
+ currentWidth += w
+ buf.WriteRune(r)
+ if currentWidth >= screenWidth {
+ ret = append(ret, buf.String())
+ buf.Reset()
+ currentWidth = 0
+ }
+ }
+ ret = append(ret, buf.String())
+ return ret
+}
+
+// calculate how many lines for N character
+func LineCount(screenWidth, w int) int {
+ r := w / screenWidth
+ if w%screenWidth != 0 {
+ r++
+ }
+ return r
+}
+
+func IsWordBreak(i rune) bool {
+ switch {
+ case i >= 'a' && i <= 'z':
+ case i >= 'A' && i <= 'Z':
+ case i >= '0' && i <= '9':
+ default:
+ return true
+ }
+ return false
+}
+
+func GetInt(s []string, def int) int {
+ if len(s) == 0 {
+ return def
+ }
+ c, err := strconv.Atoi(s[0])
+ if err != nil {
+ return def
+ }
+ return c
+}
+
+type RawMode struct {
+ state *State
+}
+
+func (r *RawMode) Enter() (err error) {
+ r.state, err = MakeRaw(GetStdin())
+ return err
+}
+
+func (r *RawMode) Exit() error {
+ if r.state == nil {
+ return nil
+ }
+ return Restore(GetStdin(), r.state)
+}
+
+// -----------------------------------------------------------------------------
+
+func sleep(n int) {
+ Debug(n)
+ time.Sleep(2000 * time.Millisecond)
+}
+
+// print a linked list to Debug()
+func debugList(l *list.List) {
+ idx := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ Debug(idx, fmt.Sprintf("%+v", e.Value))
+ idx++
+ }
+}
+
+// append log info to another file
+func Debug(o ...interface{}) {
+ f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ fmt.Fprintln(f, o...)
+ f.Close()
+}
diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go
new file mode 100644
index 00000000..f88dac97
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils_unix.go
@@ -0,0 +1,83 @@
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+
+package readline
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+)
+
+type winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+// SuspendMe use to send suspend signal to myself, when we in the raw mode.
+// For OSX it need to send to parent's pid
+// For Linux it need to send to myself
+func SuspendMe() {
+ p, _ := os.FindProcess(os.Getppid())
+ p.Signal(syscall.SIGTSTP)
+ p, _ = os.FindProcess(os.Getpid())
+ p.Signal(syscall.SIGTSTP)
+}
+
+// get width of the terminal
+func getWidth(stdoutFd int) int {
+ cols, _, err := GetSize(stdoutFd)
+ if err != nil {
+ return -1
+ }
+ return cols
+}
+
+func GetScreenWidth() int {
+ w := getWidth(syscall.Stdout)
+ if w < 0 {
+ w = getWidth(syscall.Stderr)
+ }
+ return w
+}
+
+// ClearScreen clears the console screen
+func ClearScreen(w io.Writer) (int, error) {
+ return w.Write([]byte("\033[H"))
+}
+
+func DefaultIsTerminal() bool {
+ return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr))
+}
+
+func GetStdin() int {
+ return syscall.Stdin
+}
+
+// -----------------------------------------------------------------------------
+
+var (
+ widthChange sync.Once
+ widthChangeCallback func()
+)
+
+func DefaultOnWidthChanged(f func()) {
+ widthChangeCallback = f
+ widthChange.Do(func() {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGWINCH)
+
+ go func() {
+ for {
+ _, ok := <-ch
+ if !ok {
+ break
+ }
+ widthChangeCallback()
+ }
+ }()
+ })
+}
diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go
new file mode 100644
index 00000000..5bfa55dc
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils_windows.go
@@ -0,0 +1,41 @@
+// +build windows
+
+package readline
+
+import (
+ "io"
+ "syscall"
+)
+
+func SuspendMe() {
+}
+
+func GetStdin() int {
+ return int(syscall.Stdin)
+}
+
+func init() {
+ isWindows = true
+}
+
+// get width of the terminal
+func GetScreenWidth() int {
+ info, _ := GetConsoleScreenBufferInfo()
+ if info == nil {
+ return -1
+ }
+ return int(info.dwSize.x)
+}
+
+// ClearScreen clears the console screen
+func ClearScreen(_ io.Writer) error {
+ return SetConsoleCursorPosition(&_COORD{0, 0})
+}
+
+func DefaultIsTerminal() bool {
+ return true
+}
+
+func DefaultOnWidthChanged(func()) {
+
+}
diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go
new file mode 100644
index 00000000..bedf2c1a
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/vim.go
@@ -0,0 +1,176 @@
+package readline
+
+const (
+ VIM_NORMAL = iota
+ VIM_INSERT
+ VIM_VISUAL
+)
+
+type opVim struct {
+ cfg *Config
+ op *Operation
+ vimMode int
+}
+
+func newVimMode(op *Operation) *opVim {
+ ov := &opVim{
+ cfg: op.cfg,
+ op: op,
+ }
+ ov.SetVimMode(ov.cfg.VimMode)
+ return ov
+}
+
+func (o *opVim) SetVimMode(on bool) {
+ if o.cfg.VimMode && !on { // turn off
+ o.ExitVimMode()
+ }
+ o.cfg.VimMode = on
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) ExitVimMode() {
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) IsEnableVimMode() bool {
+ return o.cfg.VimMode
+}
+
+func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) {
+ rb := o.op.buf
+ handled = true
+ switch r {
+ case 'h':
+ t = CharBackward
+ case 'j':
+ t = CharNext
+ case 'k':
+ t = CharPrev
+ case 'l':
+ t = CharForward
+ case '0', '^':
+ rb.MoveToLineStart()
+ case '$':
+ rb.MoveToLineEnd()
+ case 'x':
+ rb.Delete()
+ if rb.IsCursorInEnd() {
+ rb.MoveBackward()
+ }
+ case 'r':
+ rb.Replace(readNext())
+ case 'd':
+ next := readNext()
+ switch next {
+ case 'd':
+ rb.Erase()
+ case 'w':
+ rb.DeleteWord()
+ case 'h':
+ rb.Backspace()
+ case 'l':
+ rb.Delete()
+ }
+ case 'p':
+ rb.Yank()
+ case 'b', 'B':
+ rb.MoveToPrevWord()
+ case 'w', 'W':
+ rb.MoveToNextWord()
+ case 'e', 'E':
+ rb.MoveToEndWord()
+ case 'f', 'F', 't', 'T':
+ next := readNext()
+ prevChar := r == 't' || r == 'T'
+ reverse := r == 'F' || r == 'T'
+ switch next {
+ case CharEsc:
+ default:
+ rb.MoveTo(next, prevChar, reverse)
+ }
+ default:
+ return r, false
+ }
+ return t, true
+}
+
+func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) {
+ rb := o.op.buf
+ handled = true
+ switch r {
+ case 'i':
+ case 'I':
+ rb.MoveToLineStart()
+ case 'a':
+ rb.MoveForward()
+ case 'A':
+ rb.MoveToLineEnd()
+ case 's':
+ rb.Delete()
+ case 'S':
+ rb.Erase()
+ case 'c':
+ next := readNext()
+ switch next {
+ case 'c':
+ rb.Erase()
+ case 'w':
+ rb.DeleteWord()
+ case 'h':
+ rb.Backspace()
+ case 'l':
+ rb.Delete()
+ }
+ default:
+ return r, false
+ }
+
+ o.EnterVimInsertMode()
+ return
+}
+
+func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) {
+ switch r {
+ case CharEnter, CharInterrupt:
+ o.ExitVimMode()
+ return r
+ }
+
+ if r, handled := o.handleVimNormalMovement(r, readNext); handled {
+ return r
+ }
+
+ if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled {
+ return r
+ }
+
+ // invalid operation
+ o.op.t.Bell()
+ return 0
+}
+
+func (o *opVim) EnterVimInsertMode() {
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) ExitVimInsertMode() {
+ o.vimMode = VIM_NORMAL
+}
+
+func (o *opVim) HandleVim(r rune, readNext func() rune) rune {
+ if o.vimMode == VIM_NORMAL {
+ return o.HandleVimNormal(r, readNext)
+ }
+ if r == CharEsc {
+ o.ExitVimInsertMode()
+ return 0
+ }
+
+ switch o.vimMode {
+ case VIM_INSERT:
+ return r
+ case VIM_VISUAL:
+ }
+ return r
+}
diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go
new file mode 100644
index 00000000..63f4f7b7
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/windows_api.go
@@ -0,0 +1,152 @@
+// +build windows
+
+package readline
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel = NewKernel()
+ stdout = uintptr(syscall.Stdout)
+ stdin = uintptr(syscall.Stdin)
+)
+
+type Kernel struct {
+ SetConsoleCursorPosition,
+ SetConsoleTextAttribute,
+ FillConsoleOutputCharacterW,
+ FillConsoleOutputAttribute,
+ ReadConsoleInputW,
+ GetConsoleScreenBufferInfo,
+ GetConsoleCursorInfo,
+ GetStdHandle CallFunc
+}
+
+type short int16
+type word uint16
+type dword uint32
+type wchar uint16
+
+type _COORD struct {
+ x short
+ y short
+}
+
+func (c *_COORD) ptr() uintptr {
+ return uintptr(*(*int32)(unsafe.Pointer(c)))
+}
+
+const (
+ EVENT_KEY = 0x0001
+ EVENT_MOUSE = 0x0002
+ EVENT_WINDOW_BUFFER_SIZE = 0x0004
+ EVENT_MENU = 0x0008
+ EVENT_FOCUS = 0x0010
+)
+
+type _KEY_EVENT_RECORD struct {
+ bKeyDown int32
+ wRepeatCount word
+ wVirtualKeyCode word
+ wVirtualScanCode word
+ unicodeChar wchar
+ dwControlKeyState dword
+}
+
+// KEY_EVENT_RECORD KeyEvent;
+// MOUSE_EVENT_RECORD MouseEvent;
+// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent;
+// MENU_EVENT_RECORD MenuEvent;
+// FOCUS_EVENT_RECORD FocusEvent;
+type _INPUT_RECORD struct {
+ EventType word
+ Padding uint16
+ Event [16]byte
+}
+
+type _CONSOLE_SCREEN_BUFFER_INFO struct {
+ dwSize _COORD
+ dwCursorPosition _COORD
+ wAttributes word
+ srWindow _SMALL_RECT
+ dwMaximumWindowSize _COORD
+}
+
+type _SMALL_RECT struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type _CONSOLE_CURSOR_INFO struct {
+ dwSize dword
+ bVisible bool
+}
+
+type CallFunc func(u ...uintptr) error
+
+func NewKernel() *Kernel {
+ k := &Kernel{}
+ kernel32 := syscall.NewLazyDLL("kernel32.dll")
+ v := reflect.ValueOf(k).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ f := kernel32.NewProc(name)
+ v.Field(i).Set(reflect.ValueOf(k.Wrap(f)))
+ }
+ return k
+}
+
+func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc {
+ return func(args ...uintptr) error {
+ var r0 uintptr
+ var e1 syscall.Errno
+ size := uintptr(len(args))
+ if len(args) <= 3 {
+ buf := make([]uintptr, 3)
+ copy(buf, args)
+ r0, _, e1 = syscall.Syscall(p.Addr(), size,
+ buf[0], buf[1], buf[2])
+ } else {
+ buf := make([]uintptr, 6)
+ copy(buf, args)
+ r0, _, e1 = syscall.Syscall6(p.Addr(), size,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
+ )
+ }
+
+ if int(r0) == 0 {
+ if e1 != 0 {
+ return error(e1)
+ } else {
+ return syscall.EINVAL
+ }
+ }
+ return nil
+ }
+
+}
+
+func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) {
+ t := new(_CONSOLE_SCREEN_BUFFER_INFO)
+ err := kernel.GetConsoleScreenBufferInfo(
+ stdout,
+ uintptr(unsafe.Pointer(t)),
+ )
+ return t, err
+}
+
+func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) {
+ t := new(_CONSOLE_CURSOR_INFO)
+ err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t)))
+ return t, err
+}
+
+func SetConsoleCursorPosition(c *_COORD) error {
+ return kernel.SetConsoleCursorPosition(stdout, c.ptr())
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 00000000..bc52e96f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 00000000..79299478
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 00000000..205c28d6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 00000000..1be8ce94
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 00000000..2e3d22f3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 00000000..aacaac6f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 00000000..f78d89fc
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 00000000..b04edb7d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 00000000..32c0e338
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml
new file mode 100644
index 00000000..bfc42120
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt
new file mode 100644
index 00000000..e028b46a
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
new file mode 100644
index 00000000..2d84889a
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -0,0 +1,10 @@
+.PHONY: ci generate clean
+
+ci: clean generate
+ go test -v ./...
+
+generate:
+ go generate .
+
+clean:
+ rm -rf *_generated*.go
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
new file mode 100644
index 00000000..ae44137e
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -0,0 +1,94 @@
+# httpsnoop
+
+Package httpsnoop provides an easy way to capture http related metrics (i.e.
+response time, bytes written, and http status code) from your application's
+http.Handlers.
+
+Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
+which is also exposed for users interested in a more low-level API.
+
+[](https://godoc.org/github.com/felixge/httpsnoop)
+[](https://travis-ci.org/felixge/httpsnoop)
+
+## Usage Example
+
+```go
+// myH is your app's http handler, perhaps a http.ServeMux or similar.
+var myH http.Handler
+// wrappedH wraps myH in order to log every request.
+wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ m := httpsnoop.CaptureMetrics(myH, w, r)
+ log.Printf(
+ "%s %s (code=%d dt=%s written=%d)",
+ r.Method,
+ r.URL,
+ m.Code,
+ m.Duration,
+ m.Written,
+ )
+})
+http.ListenAndServe(":8080", wrappedH)
+```
+
+## Why this package exists
+
+Instrumenting an application's http.Handler is surprisingly difficult.
+
+However if you google for e.g. "capture ResponseWriter status code" you'll find
+lots of advise and code examples that suggest it to be a fairly trivial
+undertaking. Unfortunately everything I've seen so far has a high chance of
+breaking your application.
+
+The main problem is that a `http.ResponseWriter` often implements additional
+interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
+`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
+in your own struct that also implements the `http.ResponseWriter` interface
+will hide the additional interfaces mentioned above. This has a high change of
+introducing subtle bugs into any non-trivial application.
+
+Another approach I've seen people take is to return a struct that implements
+all of the interfaces above. However, that's also problematic, because it's
+difficult to fake some of these interfaces behaviors when the underlying
+`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
+because an application may choose to operate differently, merely because it
+detects the presence of these additional interfaces.
+
+This package solves this problem by checking which additional interfaces a
+`http.ResponseWriter` implements, returning a wrapped version implementing the
+exact same set of interfaces.
+
+Additionally this package properly handles edge cases such as `WriteHeader` not
+being called, or called more than once, as well as concurrent calls to
+`http.ResponseWriter` methods, and even calls happening after the wrapped
+`ServeHTTP` has already returned.
+
+Unfortunately this package is not perfect either. It's possible that it is
+still missing some interfaces provided by the go core (let me know if you find
+one), and it won't work for applications adding their own interfaces into the
+mix.
+
+However, hopefully the explanation above has sufficiently scared you of rolling
+your own solution to this problem. httpsnoop may still break your application,
+but at least it tries to avoid it as much as possible.
+
+Anyway, the real problem here is that smuggling additional interfaces inside
+`http.ResponseWriter` is a problematic design choice, but it probably goes as
+deep as the Go language specification itself. But that's okay, I still prefer
+Go over the alternatives ;).
+
+## Performance
+
+```
+BenchmarkBaseline-8 20000 94912 ns/op
+BenchmarkCaptureMetrics-8 20000 95461 ns/op
+```
+
+As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
+overhead of ~500 ns per http request on my machine. However, the margin of
+error appears to be larger than that, therefor it should be reasonable to
+assume that the overhead introduced by `CaptureMetrics` is absolutely
+negligible.
+
+## License
+
+MIT
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
new file mode 100644
index 00000000..4c45b1a8
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -0,0 +1,84 @@
+package httpsnoop
+
+import (
+ "io"
+ "net/http"
+ "sync"
+ "time"
+)
+
+// Metrics holds metrics captured from CaptureMetrics.
+type Metrics struct {
+ // Code is the first http response code passed to the WriteHeader func of
+ // the ResponseWriter. If no such call is made, a default code of 200 is
+ // assumed instead.
+ Code int
+ // Duration is the time it took to execute the handler.
+ Duration time.Duration
+ // Written is the number of bytes successfully written by the Write or
+ // ReadFrom function of the ResponseWriter. ResponseWriters may also write
+ // data to their underlaying connection directly (e.g. headers), but those
+ // are not tracked. Therefor the number of Written bytes will usually match
+ // the size of the response body.
+ Written int64
+}
+
+// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
+// returns the metrics it captured from it.
+func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
+ return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
+ hnd.ServeHTTP(ww, r)
+ })
+}
+
+// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
+// resulting metrics. This is very similar to CaptureMetrics (which is just
+// sugar on top of this func), but is a more usable interface if your
+// application doesn't use the Go http.Handler interface.
+func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
+ var (
+ start = time.Now()
+ m = Metrics{Code: http.StatusOK}
+ headerWritten bool
+ lock sync.Mutex
+ hooks = Hooks{
+ WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
+ return func(code int) {
+ next(code)
+ lock.Lock()
+ defer lock.Unlock()
+ if !headerWritten {
+ m.Code = code
+ headerWritten = true
+ }
+ }
+ },
+
+ Write: func(next WriteFunc) WriteFunc {
+ return func(p []byte) (int, error) {
+ n, err := next(p)
+ lock.Lock()
+ defer lock.Unlock()
+ m.Written += int64(n)
+ headerWritten = true
+ return n, err
+ }
+ },
+
+ ReadFrom: func(next ReadFromFunc) ReadFromFunc {
+ return func(src io.Reader) (int64, error) {
+ n, err := next(src)
+ lock.Lock()
+ defer lock.Unlock()
+ headerWritten = true
+ m.Written += n
+ return n, err
+ }
+ },
+ }
+ )
+
+ fn(Wrap(w, hooks))
+ m.Duration = time.Since(start)
+ return m
+}
diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go
new file mode 100644
index 00000000..203c35b3
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/docs.go
@@ -0,0 +1,10 @@
+// Package httpsnoop provides an easy way to capture http related metrics (i.e.
+// response time, bytes written, and http status code) from your application's
+// http.Handlers.
+//
+// Doing this requires non-trivial wrapping of the http.ResponseWriter
+// interface, which is also exposed for users interested in a more low-level
+// API.
+package httpsnoop
+
+//go:generate go run codegen/main.go
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
new file mode 100644
index 00000000..41a20da9
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -0,0 +1,385 @@
+// +build go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// PushFunc is part of the http.Pusher interface.
+type PushFunc func(target string, opts *http.PushOptions) error
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+ Push func(PushFunc) PushFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+// - http.Pusher
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ _, i4 := w.(http.Pusher)
+ switch {
+ // combination 1/32
+ case !i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ }{rw}
+ // combination 2/32
+ case !i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Pusher
+ }{rw, rw}
+ // combination 3/32
+ case !i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw}
+ // combination 4/32
+ case !i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 5/32
+ case !i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw}
+ // combination 6/32
+ case !i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 7/32
+ case !i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 8/32
+ case !i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 9/32
+ case !i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw}
+ // combination 10/32
+ case !i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 11/32
+ case !i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 12/32
+ case !i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 13/32
+ case !i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 14/32
+ case !i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 15/32
+ case !i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 16/32
+ case !i0 && i1 && i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 17/32
+ case i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw}
+ // combination 18/32
+ case i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 19/32
+ case i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 20/32
+ case i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 21/32
+ case i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 22/32
+ case i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 23/32
+ case i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 24/32
+ case i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 25/32
+ case i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 26/32
+ case i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 27/32
+ case i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 28/32
+ case i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 29/32
+ case i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 30/32
+ case i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 31/32
+ case i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 32/32
+ case i0 && i1 && i2 && i3 && i4:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
+
+func (w *rw) Push(target string, opts *http.PushOptions) error {
+ f := w.w.(http.Pusher).Push
+ if w.h.Push != nil {
+ f = w.h.Push(f)
+ }
+ return f(target, opts)
+}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
new file mode 100644
index 00000000..36bb59b8
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -0,0 +1,243 @@
+// +build !go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ switch {
+ // combination 1/16
+ case !i0 && !i1 && !i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ }{rw}
+ // combination 2/16
+ case !i0 && !i1 && !i2 && i3:
+ return struct {
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw}
+ // combination 3/16
+ case !i0 && !i1 && i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw}
+ // combination 4/16
+ case !i0 && !i1 && i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 5/16
+ case !i0 && i1 && !i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw}
+ // combination 6/16
+ case !i0 && i1 && !i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 7/16
+ case !i0 && i1 && i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 8/16
+ case !i0 && i1 && i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 9/16
+ case i0 && !i1 && !i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw}
+ // combination 10/16
+ case i0 && !i1 && !i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 11/16
+ case i0 && !i1 && i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 12/16
+ case i0 && !i1 && i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 13/16
+ case i0 && i1 && !i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 14/16
+ case i0 && i1 && !i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 15/16
+ case i0 && i1 && i2 && !i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 16/16
+ case i0 && i1 && i2 && i3:
+ return struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes
new file mode 100644
index 00000000..fcadb2cf
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.gitattributes
@@ -0,0 +1 @@
+* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/flosch/pongo2/.gitignore
new file mode 100644
index 00000000..1346be55
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.gitignore
@@ -0,0 +1,41 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.idea
+.vscode
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.project
+EBNF.txt
+test1.tpl
+pongo2_internal_test.go
+tpl-error.out
+/count.out
+/cover.out
+*.swp
+*.iml
+/cpu.out
+/mem.out
+/pongo2.test
+*.error
+/profile
+/coverage.out
+/pongo2_internal_test.ignore
diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml
new file mode 100644
index 00000000..e39e5d05
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+os:
+ - linux
+ - osx
+go:
+ - 1.12
+script:
+ - go test -v
diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS
new file mode 100644
index 00000000..601697cf
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/AUTHORS
@@ -0,0 +1,11 @@
+Main author and maintainer of pongo2:
+
+* Florian Schlachter
+
+Contributors (in no specific order):
+
+* @romanoaugusto88
+* @vitalbh
+* @blaubaer
+
+Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE
new file mode 100644
index 00000000..e876f869
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2014 Florian Schlachter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md
new file mode 100644
index 00000000..e59694e2
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/README.md
@@ -0,0 +1,167 @@
+# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
+
+[](https://pkg.go.dev/flosch/pongo2)
+[](https://travis-ci.org/flosch/pongo2)
+
+pongo2 is a Django-syntax like templating-language.
+
+Install/update using `go get` (no dependencies required by pongo2):
+
+```sh
+go get -u github.com/flosch/pongo2
+```
+
+Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
+
+## First impression of a template
+
+```django
+
+
+ Our admins and users
+
+ {# This is a short example to give you a quick overview of pongo2's syntax. #}
+ {% macro user_details(user, is_admin=false) %}
+
+
+
+ = 40) || (user.karma > calc_avg_karma(userlist)+5) %} class="karma-good"{%
+ endif %}>
+
+
+ {{ user }}
+
+
+
+
This user registered {{ user.register_date|naturaltime }}.
+
+
+
The user's biography:
+
+ {{ user.biography|markdown|truncatewords_html:15 }}
+ read more
+
+
+ {% if is_admin %}
+
This user is an admin!
+ {% endif %}
+
+ {% endmacro %}
+
+
+
+
+ Our admins
+ {% for admin in adminlist %} {{ user_details(admin, true) }} {% endfor %}
+
+ Our members
+ {% for user in userlist %} {{ user_details(user) }} {% endfor %}
+
+
+```
+
+## Features
+
+- Syntax- and feature-set-compatible with [Django 1.7](https://django.readthedocs.io/en/1.7.x/topics/templates.html)
+- [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
+- [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
+- [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
+- Additional features:
+ - Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
+ - [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
+
+## Caveats
+
+### Filters
+
+- **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
+- **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
+- **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
+
+### Tags
+
+- **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
+- **now**: takes Go's time format (see **date** and **time**-filter).
+
+### Misc
+
+- **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
+ `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
+
+## Add-ons, libraries and helpers
+
+### Official
+
+- [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
+
+### 3rd-party
+
+- [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
+- [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
+- [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
+- [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+- [Build'n support for Iris' template engine](https://github.com/kataras/iris)
+- [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
+- [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
+- [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
+- [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
+
+Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
+
+## Who's using pongo2
+
+[I'm compiling a list of pongo2 users](https://github.com/flosch/pongo2/issues/241). Add your project or company!
+
+## API-usage examples
+
+Please see the documentation for a full list of provided API methods.
+
+### A tiny example (template string)
+
+```go
+// Compile the template first (i. e. creating the AST)
+tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+if err != nil {
+ panic(err)
+}
+// Now you can render the template with the given
+// pongo2.Context how often you want to.
+out, err := tpl.Execute(pongo2.Context{"name": "florian"})
+if err != nil {
+ panic(err)
+}
+fmt.Println(out) // Output: Hello Florian!
+```
+
+## Example server-usage (template file)
+
+```go
+package main
+
+import (
+ "github.com/flosch/pongo2"
+ "net/http"
+)
+
+// Pre-compiling the templates at application startup using the
+// little Must()-helper function (Must() will panic if FromFile()
+// or FromString() will return with an error - that's it).
+// It's faster to pre-compile it anywhere at startup and only
+// execute the template later.
+var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
+
+func examplePage(w http.ResponseWriter, r *http.Request) {
+ // Execute the template per HTTP request
+ err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func main() {
+ http.HandleFunc("/", examplePage)
+ http.ListenAndServe(":8080", nil)
+}
+```
diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go
new file mode 100644
index 00000000..dbc5e3e3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/context.go
@@ -0,0 +1,137 @@
+package pongo2
+
+import (
+ "fmt"
+ "regexp"
+
+ "errors"
+)
+
+var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
+
+var autoescape = true
+
+func SetAutoescape(newValue bool) {
+ autoescape = newValue
+}
+
+// A Context type provides constants, variables, instances or functions to a template.
+//
+// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
+// Currently, context["pongo2"] contains the following keys:
+// 1. version: returns the version string
+//
+// Template examples for accessing items from your context:
+// {{ myconstant }}
+// {{ myfunc("test", 42) }}
+// {{ user.name }}
+// {{ pongo2.version }}
+type Context map[string]interface{}
+
+func (c Context) checkForValidIdentifiers() *Error {
+ for k, v := range c {
+ if !reIdentifiers.MatchString(k) {
+ return &Error{
+ Sender: "checkForValidIdentifiers",
+ OrigError: fmt.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
+ }
+ }
+ }
+ return nil
+}
+
+// Update updates this context with the key/value-pairs from another context.
+func (c Context) Update(other Context) Context {
+ for k, v := range other {
+ c[k] = v
+ }
+ return c
+}
+
+// ExecutionContext contains all data important for the current rendering state.
+//
+// If you're writing a custom tag, your tag's Execute()-function will
+// have access to the ExecutionContext. This struct stores anything
+// about the current rendering process's Context including
+// the Context provided by the user (field Public).
+// You can safely use the Private context to provide data to the user's
+// template (like a 'forloop'-information). The Shared-context is used
+// to share data between tags. All ExecutionContexts share this context.
+//
+// Please be careful when accessing the Public data.
+// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
+//
+// To create your own execution context within tags, use the
+// NewChildExecutionContext(parent) function.
+type ExecutionContext struct {
+ template *Template
+
+ Autoescape bool
+ Public Context
+ Private Context
+ Shared Context
+}
+
+var pongo2MetaContext = Context{
+ "version": Version,
+}
+
+func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
+ privateCtx := make(Context)
+
+ // Make the pongo2-related funcs/vars available to the context
+ privateCtx["pongo2"] = pongo2MetaContext
+
+ return &ExecutionContext{
+ template: tpl,
+
+ Public: ctx,
+ Private: privateCtx,
+ Autoescape: autoescape,
+ }
+}
+
+func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
+ newctx := &ExecutionContext{
+ template: parent.template,
+
+ Public: parent.Public,
+ Private: make(Context),
+ Autoescape: parent.Autoescape,
+ }
+ newctx.Shared = parent.Shared
+
+ // Copy all existing private items
+ newctx.Private.Update(parent.Private)
+
+ return newctx
+}
+
+func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
+ return ctx.OrigError(errors.New(msg), token)
+}
+
+func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
+ filename := ctx.template.name
+ var line, col int
+ if token != nil {
+ // No tokens available
+ // TODO: Add location (from where?)
+ filename = token.Filename
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: ctx.template,
+ Filename: filename,
+ Line: line,
+ Column: col,
+ Token: token,
+ Sender: "execution",
+ OrigError: err,
+ }
+}
+
+func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
+ ctx.template.set.logf(format, args...)
+}
diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go
new file mode 100644
index 00000000..5a23e2b2
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/doc.go
@@ -0,0 +1,31 @@
+// A Django-syntax like template-engine
+//
+// Blog posts about pongo2 (including introduction and migration):
+// https://www.florian-schlachter.de/?tag=pongo2
+//
+// Complete documentation on the template language:
+// https://docs.djangoproject.com/en/dev/topics/templates/
+//
+// Try out pongo2 live in the pongo2 playground:
+// https://www.florian-schlachter.de/pongo2/
+//
+// Make sure to read README.md in the repository as well.
+//
+// A tiny example with template strings:
+//
+// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
+//
+// // Compile the template first (i. e. creating the AST)
+// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
+// if err != nil {
+// panic(err)
+// }
+// // Now you can render the template with the given
+// // pongo2.Context how often you want to.
+// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
+// if err != nil {
+// panic(err)
+// }
+// fmt.Println(out) // Output: Hello Fred!
+//
+package pongo2
diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go
new file mode 100644
index 00000000..8aec8c10
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/error.go
@@ -0,0 +1,91 @@
+package pongo2
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+// The Error type is being used to address an error during lexing, parsing or
+// execution. If you want to return an error object (for example in your own
+// tag or filter) fill this object with as much information as you have.
+// Make sure "Sender" is always given (if you're returning an error within
+// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
+// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
+type Error struct {
+ Template *Template
+ Filename string
+ Line int
+ Column int
+ Token *Token
+ Sender string
+ OrigError error
+}
+
+func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
+ if e.Template == nil {
+ e.Template = template
+ }
+
+ if e.Token == nil {
+ e.Token = t
+ if e.Line <= 0 {
+ e.Line = t.Line
+ e.Column = t.Col
+ }
+ }
+
+ return e
+}
+
+// Returns a nice formatted error string.
+func (e *Error) Error() string {
+ s := "[Error"
+ if e.Sender != "" {
+ s += " (where: " + e.Sender + ")"
+ }
+ if e.Filename != "" {
+ s += " in " + e.Filename
+ }
+ if e.Line > 0 {
+ s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
+ if e.Token != nil {
+ s += fmt.Sprintf(" near '%s'", e.Token.Val)
+ }
+ }
+ s += "] "
+ s += e.OrigError.Error()
+ return s
+}
+
+// RawLine returns the affected line from the original template, if available.
+func (e *Error) RawLine() (line string, available bool, outErr error) {
+ if e.Line <= 0 || e.Filename == "" {
+ return "", false, nil
+ }
+
+ filename := e.Filename
+ if e.Template != nil {
+ filename = e.Template.set.resolveFilename(e.Template, e.Filename)
+ }
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", false, err
+ }
+ defer func() {
+ err := file.Close()
+ if err != nil && outErr == nil {
+ outErr = err
+ }
+ }()
+
+ scanner := bufio.NewScanner(file)
+ l := 0
+ for scanner.Scan() {
+ l++
+ if l == e.Line {
+ return scanner.Text(), true, nil
+ }
+ }
+ return "", false, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go
new file mode 100644
index 00000000..8d4c89e2
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/filters.go
@@ -0,0 +1,141 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+// FilterFunction is the type filter functions must fulfil
+type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
+
+var filters map[string]FilterFunction
+
+func init() {
+ filters = make(map[string]FilterFunction)
+}
+
+// FilterExists returns true if the given filter is already registered
+func FilterExists(name string) bool {
+ _, existing := filters[name]
+ return existing
+}
+
+// RegisterFilter registers a new filter. If there's already a filter with the same
+// name, RegisterFilter will panic. You usually want to call this
+// function in the filter's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterFilter(name string, fn FilterFunction) error {
+ if FilterExists(name) {
+ return fmt.Errorf("filter with name '%s' is already registered", name)
+ }
+ filters[name] = fn
+ return nil
+}
+
+// ReplaceFilter replaces an already registered filter with a new implementation. Use this
+// function with caution since it allows you to change existing filter behaviour.
+func ReplaceFilter(name string, fn FilterFunction) error {
+ if !FilterExists(name) {
+ return fmt.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
+ }
+ filters[name] = fn
+ return nil
+}
+
+// MustApplyFilter behaves like ApplyFilter, but panics on an error.
+func MustApplyFilter(name string, value *Value, param *Value) *Value {
+ val, err := ApplyFilter(name, value, param)
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+// ApplyFilter applies a filter to a given value using the given parameters.
+// Returns a *pongo2.Value or an error.
+func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
+ fn, existing := filters[name]
+ if !existing {
+ return nil, &Error{
+ Sender: "applyfilter",
+ OrigError: fmt.Errorf("Filter with name '%s' not found.", name),
+ }
+ }
+
+ // Make sure param is a *Value
+ if param == nil {
+ param = AsValue(nil)
+ }
+
+ return fn(value, param)
+}
+
+type filterCall struct {
+ token *Token
+
+ name string
+ parameter IEvaluator
+
+ filterFunc FilterFunction
+}
+
+func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
+ var param *Value
+ var err *Error
+
+ if fc.parameter != nil {
+ param, err = fc.parameter.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+
+ filteredValue, err := fc.filterFunc(v, param)
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
+ }
+ return filteredValue, nil
+}
+
+// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
+func (p *Parser) parseFilter() (*filterCall, *Error) {
+ identToken := p.MatchType(TokenIdentifier)
+
+ // Check filter ident
+ if identToken == nil {
+ return nil, p.Error("Filter name must be an identifier.", nil)
+ }
+
+ filter := &filterCall{
+ token: identToken,
+ name: identToken.Val,
+ }
+
+ // Get the appropriate filter function and bind it
+ filterFn, exists := filters[identToken.Val]
+ if !exists {
+ return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
+ }
+
+ filter.filterFunc = filterFn
+
+ // Check for filter-argument (2 tokens needed: ':' ARG)
+ if p.Match(TokenSymbol, ":") != nil {
+ if p.Peek(TokenSymbol, "}}") != nil {
+ return nil, p.Error("Filter parameter required after ':'.", nil)
+ }
+
+ // Get filter argument expression
+ v, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filter.parameter = v
+ }
+
+ return filter, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go
new file mode 100644
index 00000000..c0ec6161
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/filters_builtin.go
@@ -0,0 +1,927 @@
+package pongo2
+
+/* Filters that are provided through github.com/flosch/pongo2-addons:
+ ------------------------------------------------------------------
+
+ filesizeformat
+ slugify
+ timesince
+ timeuntil
+
+ Filters that won't be added:
+ ----------------------------
+
+ get_static_prefix (reason: web-framework specific)
+ pprint (reason: python-specific)
+ static (reason: web-framework specific)
+
+ Reconsideration (not implemented yet):
+ --------------------------------------
+
+ force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
+ safeseq (reason: same reason as `force_escape`)
+ unordered_list (python-specific; not sure whether needed or not)
+ dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
+ dictsortreversed (see dictsort)
+*/
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "errors"
+)
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterFilter("escape", filterEscape)
+ RegisterFilter("safe", filterSafe)
+ RegisterFilter("escapejs", filterEscapejs)
+
+ RegisterFilter("add", filterAdd)
+ RegisterFilter("addslashes", filterAddslashes)
+ RegisterFilter("capfirst", filterCapfirst)
+ RegisterFilter("center", filterCenter)
+ RegisterFilter("cut", filterCut)
+ RegisterFilter("date", filterDate)
+ RegisterFilter("default", filterDefault)
+ RegisterFilter("default_if_none", filterDefaultIfNone)
+ RegisterFilter("divisibleby", filterDivisibleby)
+ RegisterFilter("first", filterFirst)
+ RegisterFilter("floatformat", filterFloatformat)
+ RegisterFilter("get_digit", filterGetdigit)
+ RegisterFilter("iriencode", filterIriencode)
+ RegisterFilter("join", filterJoin)
+ RegisterFilter("last", filterLast)
+ RegisterFilter("length", filterLength)
+ RegisterFilter("length_is", filterLengthis)
+ RegisterFilter("linebreaks", filterLinebreaks)
+ RegisterFilter("linebreaksbr", filterLinebreaksbr)
+ RegisterFilter("linenumbers", filterLinenumbers)
+ RegisterFilter("ljust", filterLjust)
+ RegisterFilter("lower", filterLower)
+ RegisterFilter("make_list", filterMakelist)
+ RegisterFilter("phone2numeric", filterPhone2numeric)
+ RegisterFilter("pluralize", filterPluralize)
+ RegisterFilter("random", filterRandom)
+ RegisterFilter("removetags", filterRemovetags)
+ RegisterFilter("rjust", filterRjust)
+ RegisterFilter("slice", filterSlice)
+ RegisterFilter("split", filterSplit)
+ RegisterFilter("stringformat", filterStringformat)
+ RegisterFilter("striptags", filterStriptags)
+ RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
+ RegisterFilter("title", filterTitle)
+ RegisterFilter("truncatechars", filterTruncatechars)
+ RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
+ RegisterFilter("truncatewords", filterTruncatewords)
+ RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
+ RegisterFilter("upper", filterUpper)
+ RegisterFilter("urlencode", filterUrlencode)
+ RegisterFilter("urlize", filterUrlize)
+ RegisterFilter("urlizetrunc", filterUrlizetrunc)
+ RegisterFilter("wordcount", filterWordcount)
+ RegisterFilter("wordwrap", filterWordwrap)
+ RegisterFilter("yesno", filterYesno)
+
+ RegisterFilter("float", filterFloat) // pongo-specific
+ RegisterFilter("integer", filterInteger) // pongo-specific
+}
+
+func filterTruncatecharsHelper(s string, newLen int) string {
+ runes := []rune(s)
+ if newLen < len(runes) {
+ if newLen >= 3 {
+ return fmt.Sprintf("%s...", string(runes[:newLen-3]))
+ }
+ // Not enough space for the ellipsis
+ return string(runes[:newLen])
+ }
+ return string(runes)
+}
+
+func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
+ vLen := len(value)
+ var tagStack []string
+ idx := 0
+
+ for idx < vLen && !cond() {
+ c, s := utf8.DecodeRuneInString(value[idx:])
+ if c == utf8.RuneError {
+ idx += s
+ continue
+ }
+
+ if c == '<' {
+ newOutput.WriteRune(c)
+ idx += s // consume "<"
+
+ if idx+1 < vLen {
+ if value[idx] == '/' {
+ // Close tag
+
+ newOutput.WriteString("/")
+
+ tag := ""
+ idx++ // consume "/"
+
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+ tag += string(c2)
+ idx += size2
+ }
+
+ if len(tagStack) > 0 {
+ // Ideally, the close tag is TOP of tag stack
+ // In malformed HTML, it must not be, so iterate through the stack and remove the tag
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ if tagStack[i] == tag {
+ // Found the tag
+ tagStack[i] = tagStack[len(tagStack)-1]
+ tagStack = tagStack[:len(tagStack)-1]
+ break
+ }
+ }
+ }
+
+ newOutput.WriteString(tag)
+ newOutput.WriteString(">")
+ } else {
+ // Open tag
+
+ tag := ""
+
+ params := false
+ for idx < vLen {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ newOutput.WriteRune(c2)
+
+ // End of tag found
+ if c2 == '>' {
+ idx++ // consume ">"
+ break
+ }
+
+ if !params {
+ if c2 == ' ' {
+ params = true
+ } else {
+ tag += string(c2)
+ }
+ }
+
+ idx += size2
+ }
+
+ // Add tag to stack
+ tagStack = append(tagStack, tag)
+ }
+ }
+ } else {
+ idx = fn(c, s, idx)
+ }
+ }
+
+ finalize()
+
+ for i := len(tagStack) - 1; i >= 0; i-- {
+ tag := tagStack[i]
+ // Close everything from the regular tag stack
+ newOutput.WriteString(fmt.Sprintf("%s>", tag))
+ }
+}
+
+func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ newLen := param.Integer()
+ return AsValue(filterTruncatecharsHelper(s, newLen)), nil
+}
+
+func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer()-3, 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ textcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return textcounter >= newLen
+ }, func(c rune, s int, idx int) int {
+ textcounter++
+ newOutput.WriteRune(c)
+
+ return idx + s
+ }, func() {
+ if textcounter >= newLen && textcounter < len(value) {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ n := param.Integer()
+ if n <= 0 {
+ return AsValue(""), nil
+ }
+ nlen := min(len(words), n)
+ out := make([]string, 0, nlen)
+ for i := 0; i < nlen; i++ {
+ out = append(out, words[i])
+ }
+
+ if n < len(words) {
+ out = append(out, "...")
+ }
+
+ return AsValue(strings.Join(out, " ")), nil
+}
+
+func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
+ value := in.String()
+ newLen := max(param.Integer(), 0)
+
+ newOutput := bytes.NewBuffer(nil)
+
+ wordcounter := 0
+
+ filterTruncateHTMLHelper(value, newOutput, func() bool {
+ return wordcounter >= newLen
+ }, func(_ rune, _ int, idx int) int {
+ // Get next word
+ wordFound := false
+
+ for idx < len(value) {
+ c2, size2 := utf8.DecodeRuneInString(value[idx:])
+ if c2 == utf8.RuneError {
+ idx += size2
+ continue
+ }
+
+ if c2 == '<' {
+ // HTML tag start, don't consume it
+ return idx
+ }
+
+ newOutput.WriteRune(c2)
+ idx += size2
+
+ if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
+ // Word ends here, stop capturing it now
+ break
+ } else {
+ wordFound = true
+ }
+ }
+
+ if wordFound {
+ wordcounter++
+ }
+
+ return idx
+ }, func() {
+ if wordcounter >= newLen {
+ newOutput.WriteString("...")
+ }
+ })
+
+ return AsSafeValue(newOutput.String()), nil
+}
+
+func filterEscape(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "&", "&", -1)
+ output = strings.Replace(output, ">", ">", -1)
+ output = strings.Replace(output, "<", "<", -1)
+ output = strings.Replace(output, "\"", """, -1)
+ output = strings.Replace(output, "'", "'", -1)
+ return AsValue(output), nil
+}
+
+func filterSafe(in *Value, param *Value) (*Value, *Error) {
+ return in, nil // nothing to do here, just to keep track of the safe application
+}
+
+func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+
+ var b bytes.Buffer
+
+ idx := 0
+ for idx < len(sin) {
+ c, size := utf8.DecodeRuneInString(sin[idx:])
+ if c == utf8.RuneError {
+ idx += size
+ continue
+ }
+
+ if c == '\\' {
+ // Escape seq?
+ if idx+1 < len(sin) {
+ switch sin[idx+1] {
+ case 'r':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
+ idx += 2
+ continue
+ case 'n':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
+ idx += 2
+ continue
+ /*case '\'':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
+ idx += 2
+ continue
+ case '"':
+ b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
+ idx += 2
+ continue*/
+ }
+ }
+ }
+
+ if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
+ b.WriteRune(c)
+ } else {
+ b.WriteString(fmt.Sprintf(`\u%04X`, c))
+ }
+
+ idx += size
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterAdd(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() && param.IsNumber() {
+ if in.IsFloat() || param.IsFloat() {
+ return AsValue(in.Float() + param.Float()), nil
+ }
+ return AsValue(in.Integer() + param.Integer()), nil
+ }
+ // If in/param is not a number, we're relying on the
+ // Value's String() conversion and just add them both together
+ return AsValue(in.String() + param.String()), nil
+}
+
+func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
+ output := strings.Replace(in.String(), "\\", "\\\\", -1)
+ output = strings.Replace(output, "\"", "\\\"", -1)
+ output = strings.Replace(output, "'", "\\'", -1)
+ return AsValue(output), nil
+}
+
+func filterCut(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
+}
+
+func filterLength(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len()), nil
+}
+
+func filterLengthis(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Len() == param.Integer()), nil
+}
+
+func filterDefault(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsTrue() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNil() {
+ return param, nil
+ }
+ return in, nil
+}
+
+func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
+ if param.Integer() == 0 {
+ return AsValue(false), nil
+ }
+ return AsValue(in.Integer()%param.Integer() == 0), nil
+}
+
+func filterFirst(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(0), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
+ val := in.Float()
+
+ decimals := -1
+ if !param.IsNil() {
+ // Any argument provided?
+ decimals = param.Integer()
+ }
+
+ // if the argument is not a number (e. g. empty), the default
+ // behaviour is trim the result
+ trim := !param.IsNumber()
+
+ if decimals <= 0 {
+ // argument is negative or zero, so we
+ // want the output being trimmed
+ decimals = -decimals
+ trim = true
+ }
+
+ if trim {
+ // Remove zeroes
+ if float64(int(val)) == val {
+ return AsValue(in.Integer()), nil
+ }
+ }
+
+ return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
+}
+
+func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
+ i := param.Integer()
+ l := len(in.String()) // do NOT use in.Len() here!
+ if i <= 0 || i > l {
+ return in, nil
+ }
+ return AsValue(in.String()[l-i] - 48), nil
+}
+
+const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
+
+func filterIriencode(in *Value, param *Value) (*Value, *Error) {
+ var b bytes.Buffer
+
+ sin := in.String()
+ for _, r := range sin {
+ if strings.IndexRune(filterIRIChars, r) >= 0 {
+ b.WriteRune(r)
+ } else {
+ b.WriteString(url.QueryEscape(string(r)))
+ }
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterJoin(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() {
+ return in, nil
+ }
+ sep := param.String()
+ sl := make([]string, 0, in.Len())
+ for i := 0; i < in.Len(); i++ {
+ sl = append(sl, in.Index(i).String())
+ }
+ return AsValue(strings.Join(sl, sep)), nil
+}
+
+func filterLast(in *Value, param *Value) (*Value, *Error) {
+ if in.CanSlice() && in.Len() > 0 {
+ return in.Index(in.Len() - 1), nil
+ }
+ return AsValue(""), nil
+}
+
+func filterUpper(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToUpper(in.String())), nil
+}
+
+func filterLower(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.ToLower(in.String())), nil
+}
+
+func filterMakelist(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, string(c))
+ }
+ return AsValue(result), nil
+}
+
+func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() <= 0 {
+ return AsValue(""), nil
+ }
+ t := in.String()
+ r, size := utf8.DecodeRuneInString(t)
+ return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
+}
+
+func filterCenter(in *Value, param *Value) (*Value, *Error) {
+ width := param.Integer()
+ slen := in.Len()
+ if width <= slen {
+ return in, nil
+ }
+
+ spaces := width - slen
+ left := spaces/2 + spaces%2
+ right := spaces / 2
+
+ return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
+ in.String(), strings.Repeat(" ", right))), nil
+}
+
+func filterDate(in *Value, param *Value) (*Value, *Error) {
+ t, isTime := in.Interface().(time.Time)
+ if !isTime {
+ return nil, &Error{
+ Sender: "filter:date",
+ OrigError: errors.New("filter input argument must be of type 'time.Time'"),
+ }
+ }
+ return AsValue(t.Format(param.String())), nil
+}
+
+func filterFloat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Float()), nil
+}
+
+func filterInteger(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(in.Integer()), nil
+}
+
+func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
+ if in.Len() == 0 {
+ return in, nil
+ }
+
+ var b bytes.Buffer
+
+ // Newline =
+ // Double newline = ...
+ lines := strings.Split(in.String(), "\n")
+ lenlines := len(lines)
+
+ opened := false
+
+ for idx, line := range lines {
+
+ if !opened {
+ b.WriteString("")
+ opened = true
+ }
+
+ b.WriteString(line)
+
+ if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
+ // We've not reached the end
+ if strings.TrimSpace(lines[idx+1]) == "" {
+ // Next line is empty
+ if opened {
+ b.WriteString("
")
+ opened = false
+ }
+ } else {
+ b.WriteString("
")
+ }
+ }
+ }
+
+ if opened {
+ b.WriteString("
")
+ }
+
+ return AsValue(b.String()), nil
+}
+
+func filterSplit(in *Value, param *Value) (*Value, *Error) {
+ chunks := strings.Split(in.String(), param.String())
+
+ return AsValue(chunks), nil
+}
+
+func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(strings.Replace(in.String(), "\n", "
", -1)), nil
+}
+
+func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
+ lines := strings.Split(in.String(), "\n")
+ output := make([]string, 0, len(lines))
+ for idx, line := range lines {
+ output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
+ }
+ return AsValue(strings.Join(output, "\n")), nil
+}
+
+func filterLjust(in *Value, param *Value) (*Value, *Error) {
+ times := param.Integer() - in.Len()
+ if times < 0 {
+ times = 0
+ }
+ return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
+}
+
+func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(url.QueryEscape(in.String())), nil
+}
+
+// TODO: This regexp could do some work
+var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
+var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
+
+func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
+ var soutErr error
+ sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
+ var prefix string
+ var suffix string
+ if strings.HasPrefix(raw_url, " ") {
+ prefix = " "
+ }
+ if strings.HasSuffix(raw_url, " ") {
+ suffix = " "
+ }
+
+ raw_url = strings.TrimSpace(raw_url)
+
+ t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
+ if err != nil {
+ soutErr = err
+ return ""
+ }
+ url := t.String()
+
+ if !strings.HasPrefix(url, "http") {
+ url = fmt.Sprintf("http://%s", url)
+ }
+
+ title := raw_url
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ if autoescape {
+ t, err := ApplyFilter("escape", AsValue(title), nil)
+ if err != nil {
+ soutErr = err
+ return ""
+ }
+ title = t.String()
+ }
+
+ return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix)
+ })
+ if soutErr != nil {
+ return "", soutErr
+ }
+
+ sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
+ title := mail
+
+ if trunc > 3 && len(title) > trunc {
+ title = fmt.Sprintf("%s...", title[:trunc-3])
+ }
+
+ return fmt.Sprintf(`%s`, mail, title)
+ })
+
+ return sout, nil
+}
+
+func filterUrlize(in *Value, param *Value) (*Value, *Error) {
+ autoescape := true
+ if param.IsBool() {
+ autoescape = param.Bool()
+ }
+
+ s, err := filterUrlizeHelper(in.String(), autoescape, -1)
+ if err != nil {
+
+ }
+
+ return AsValue(s), nil
+}
+
+func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
+ s, err := filterUrlizeHelper(in.String(), true, param.Integer())
+ if err != nil {
+ return nil, &Error{
+ Sender: "filter:urlizetrunc",
+ OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+ }
+ }
+ return AsValue(s), nil
+}
+
+func filterStringformat(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
+}
+
+var reStriptags = regexp.MustCompile("<[^>]*?>")
+
+func filterStriptags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+
+ // Strip all tags
+ s = reStriptags.ReplaceAllString(s, "")
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+// https://en.wikipedia.org/wiki/Phoneword
+var filterPhone2numericMap = map[string]string{
+ "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
+ "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
+ "w": "9", "x": "9", "y": "9", "z": "9",
+}
+
+func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
+ sin := in.String()
+ for k, v := range filterPhone2numericMap {
+ sin = strings.Replace(sin, k, v, -1)
+ sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
+ }
+ return AsValue(sin), nil
+}
+
+func filterPluralize(in *Value, param *Value) (*Value, *Error) {
+ if in.IsNumber() {
+ // Works only on numbers
+ if param.Len() > 0 {
+ endings := strings.Split(param.String(), ",")
+ if len(endings) > 2 {
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
+ }
+ }
+ if len(endings) == 1 {
+ // 1 argument
+ if in.Integer() != 1 {
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // 2 arguments
+ return AsValue(endings[1]), nil
+ }
+ return AsValue(endings[0]), nil
+ }
+ } else {
+ if in.Integer() != 1 {
+ // return default 's'
+ return AsValue("s"), nil
+ }
+ }
+
+ return AsValue(""), nil
+ }
+ return nil, &Error{
+ Sender: "filter:pluralize",
+ OrigError: errors.New("filter 'pluralize' does only work on numbers"),
+ }
+}
+
+func filterRandom(in *Value, param *Value) (*Value, *Error) {
+ if !in.CanSlice() || in.Len() <= 0 {
+ return in, nil
+ }
+ i := rand.Intn(in.Len())
+ return in.Index(i), nil
+}
+
+func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
+ s := in.String()
+ tags := strings.Split(param.String(), ",")
+
+ // Strip only specific tags
+ for _, tag := range tags {
+ re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
+ s = re.ReplaceAllString(s, "")
+ }
+
+ return AsValue(strings.TrimSpace(s)), nil
+}
+
+func filterRjust(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
+}
+
+func filterSlice(in *Value, param *Value) (*Value, *Error) {
+ comp := strings.Split(param.String(), ":")
+ if len(comp) != 2 {
+ return nil, &Error{
+ Sender: "filter:slice",
+ OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
+ }
+ }
+
+ if !in.CanSlice() {
+ return in, nil
+ }
+
+ from := AsValue(comp[0]).Integer()
+ to := in.Len()
+
+ if from > to {
+ from = to
+ }
+
+ vto := AsValue(comp[1]).Integer()
+ if vto >= from && vto <= in.Len() {
+ to = vto
+ }
+
+ return in.Slice(from, to), nil
+}
+
+func filterTitle(in *Value, param *Value) (*Value, *Error) {
+ if !in.IsString() {
+ return AsValue(""), nil
+ }
+ return AsValue(strings.Title(strings.ToLower(in.String()))), nil
+}
+
+func filterWordcount(in *Value, param *Value) (*Value, *Error) {
+ return AsValue(len(strings.Fields(in.String()))), nil
+}
+
+func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
+ words := strings.Fields(in.String())
+ wordsLen := len(words)
+ wrapAt := param.Integer()
+ if wrapAt <= 0 {
+ return in, nil
+ }
+
+ linecount := wordsLen/wrapAt + wordsLen%wrapAt
+ lines := make([]string, 0, linecount)
+ for i := 0; i < linecount; i++ {
+ lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
+ }
+ return AsValue(strings.Join(lines, "\n")), nil
+}
+
+func filterYesno(in *Value, param *Value) (*Value, *Error) {
+ choices := map[int]string{
+ 0: "yes",
+ 1: "no",
+ 2: "maybe",
+ }
+ paramString := param.String()
+ customChoices := strings.Split(paramString, ",")
+ if len(paramString) > 0 {
+ if len(customChoices) > 3 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ OrigError: fmt.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+ if len(customChoices) < 2 {
+ return nil, &Error{
+ Sender: "filter:yesno",
+ OrigError: fmt.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
+ }
+ }
+
+ // Map to the options now
+ choices[0] = customChoices[0]
+ choices[1] = customChoices[1]
+ if len(customChoices) == 3 {
+ choices[2] = customChoices[2]
+ }
+ }
+
+ // maybe
+ if in.IsNil() {
+ return AsValue(choices[2]), nil
+ }
+
+ // yes
+ if in.IsTrue() {
+ return AsValue(choices[0]), nil
+ }
+
+ // no
+ return AsValue(choices[1]), nil
+}
diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go
new file mode 100644
index 00000000..880dbc04
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/helpers.go
@@ -0,0 +1,15 @@
+package pongo2
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go
new file mode 100644
index 00000000..f1897984
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/lexer.go
@@ -0,0 +1,432 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "errors"
+)
+
+const (
+ TokenError = iota
+ EOF
+
+ TokenHTML
+
+ TokenKeyword
+ TokenIdentifier
+ TokenString
+ TokenNumber
+ TokenSymbol
+)
+
+var (
+ tokenSpaceChars = " \n\r\t"
+ tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
+ tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
+ tokenDigits = "0123456789"
+
+ // Available symbols in pongo2 (within filters/tag)
+ TokenSymbols = []string{
+ // 3-Char symbols
+ "{{-", "-}}", "{%-", "-%}",
+
+ // 2-Char symbols
+ "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
+
+ // 1-Char symbol
+ "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
+ }
+
+ // Available keywords in pongo2
+ TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
+)
+
+type TokenType int
+type Token struct {
+ Filename string
+ Typ TokenType
+ Val string
+ Line int
+ Col int
+ TrimWhitespaces bool
+}
+
+type lexerStateFn func() lexerStateFn
+type lexer struct {
+ name string
+ input string
+ start int // start pos of the item
+ pos int // current pos
+ width int // width of last rune
+ tokens []*Token
+ errored bool
+ startline int
+ startcol int
+ line int
+ col int
+
+ inVerbatim bool
+ verbatimName string
+}
+
+func (t *Token) String() string {
+ val := t.Val
+ if len(val) > 1000 {
+ val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:])
+ }
+
+ typ := ""
+ switch t.Typ {
+ case TokenHTML:
+ typ = "HTML"
+ case TokenError:
+ typ = "Error"
+ case TokenIdentifier:
+ typ = "Identifier"
+ case TokenKeyword:
+ typ = "Keyword"
+ case TokenNumber:
+ typ = "Number"
+ case TokenString:
+ typ = "String"
+ case TokenSymbol:
+ typ = "Symbol"
+ default:
+ typ = "Unknown"
+ }
+
+ return fmt.Sprintf("",
+ typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
+}
+
+func lex(name string, input string) ([]*Token, *Error) {
+ l := &lexer{
+ name: name,
+ input: input,
+ tokens: make([]*Token, 0, 100),
+ line: 1,
+ col: 1,
+ startline: 1,
+ startcol: 1,
+ }
+ l.run()
+ if l.errored {
+ errtoken := l.tokens[len(l.tokens)-1]
+ return nil, &Error{
+ Filename: name,
+ Line: errtoken.Line,
+ Column: errtoken.Col,
+ Sender: "lexer",
+ OrigError: errors.New(errtoken.Val),
+ }
+ }
+ return l.tokens, nil
+}
+
+func (l *lexer) value() string {
+ return l.input[l.start:l.pos]
+}
+
+func (l *lexer) length() int {
+ return l.pos - l.start
+}
+
+func (l *lexer) emit(t TokenType) {
+ tok := &Token{
+ Filename: l.name,
+ Typ: t,
+ Val: l.value(),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+
+ if t == TokenString {
+ // Escape sequence \" in strings
+ tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
+ tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
+ }
+
+ if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
+ tok.TrimWhitespaces = true
+ tok.Val = strings.Replace(tok.Val, "-", "", -1)
+ }
+
+ l.tokens = append(l.tokens, tok)
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) next() rune {
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return EOF
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ l.col += l.width
+ return r
+}
+
+func (l *lexer) backup() {
+ l.pos -= l.width
+ l.col -= l.width
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) ignore() {
+ l.start = l.pos
+ l.startline = l.line
+ l.startcol = l.col
+}
+
+func (l *lexer) accept(what string) bool {
+ if strings.IndexRune(what, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(what string) {
+ for strings.IndexRune(what, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
+ t := &Token{
+ Filename: l.name,
+ Typ: TokenError,
+ Val: fmt.Sprintf(format, args...),
+ Line: l.startline,
+ Col: l.startcol,
+ }
+ l.tokens = append(l.tokens, t)
+ l.errored = true
+ l.startline = l.line
+ l.startcol = l.col
+ return nil
+}
+
+func (l *lexer) eof() bool {
+ return l.start >= len(l.input)-1
+}
+
+func (l *lexer) run() {
+ for {
+ // TODO: Support verbatim tag names
+ // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
+ if l.inVerbatim {
+ name := l.verbatimName
+ if name != "" {
+ name += " "
+ }
+ if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ w := len("{% endverbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ l.inVerbatim = false
+ }
+ } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.inVerbatim = true
+ w := len("{% verbatim %}")
+ l.pos += w
+ l.col += w
+ l.ignore()
+ }
+
+ if !l.inVerbatim {
+ // Ignore single-line comments {# ... #}
+ if strings.HasPrefix(l.input[l.pos:], "{#") {
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ l.pos += 2 // pass '{#'
+ l.col += 2
+
+ for {
+ switch l.peek() {
+ case EOF:
+ l.errorf("Single-line comment not closed.")
+ return
+ case '\n':
+ l.errorf("Newline not permitted in a single-line comment.")
+ return
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "#}") {
+ l.pos += 2 // pass '#}'
+ l.col += 2
+ break
+ }
+
+ l.next()
+ }
+ l.ignore() // ignore whole comment
+
+ // Comment skipped
+ continue // next token
+ }
+
+ if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
+ strings.HasPrefix(l.input[l.pos:], "{%") { // tag
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+ l.tokenize()
+ if l.errored {
+ return
+ }
+ continue
+ }
+ }
+
+ switch l.peek() {
+ case '\n':
+ l.line++
+ l.col = 0
+ }
+ if l.next() == EOF {
+ break
+ }
+ }
+
+ if l.pos > l.start {
+ l.emit(TokenHTML)
+ }
+
+ if l.inVerbatim {
+ l.errorf("verbatim-tag not closed, got EOF.")
+ }
+}
+
+func (l *lexer) tokenize() {
+ for state := l.stateCode; state != nil; {
+ state = state()
+ }
+}
+
+func (l *lexer) stateCode() lexerStateFn {
+outer_loop:
+ for {
+ switch {
+ case l.accept(tokenSpaceChars):
+ if l.value() == "\n" {
+ return l.errorf("Newline not allowed within tag/variable.")
+ }
+ l.ignore()
+ continue
+ case l.accept(tokenIdentifierChars):
+ return l.stateIdentifier
+ case l.accept(tokenDigits):
+ return l.stateNumber
+ case l.accept(`"'`):
+ return l.stateString
+ }
+
+ // Check for symbol
+ for _, sym := range TokenSymbols {
+ if strings.HasPrefix(l.input[l.start:], sym) {
+ l.pos += len(sym)
+ l.col += l.length()
+ l.emit(TokenSymbol)
+
+ if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
+ // Tag/variable end, return after emit
+ return nil
+ }
+
+ continue outer_loop
+ }
+ }
+
+ break
+ }
+
+ // Normal shut down
+ return nil
+}
+
+func (l *lexer) stateIdentifier() lexerStateFn {
+ l.acceptRun(tokenIdentifierChars)
+ l.acceptRun(tokenIdentifierCharsWithDigits)
+ for _, kw := range TokenKeywords {
+ if kw == l.value() {
+ l.emit(TokenKeyword)
+ return l.stateCode
+ }
+ }
+ l.emit(TokenIdentifier)
+ return l.stateCode
+}
+
+func (l *lexer) stateNumber() lexerStateFn {
+ l.acceptRun(tokenDigits)
+ if l.accept(tokenIdentifierCharsWithDigits) {
+ // This seems to be an identifier starting with a number.
+ // See https://github.com/flosch/pongo2/issues/151
+ return l.stateIdentifier()
+ }
+ /*
+ Maybe context-sensitive number lexing?
+ * comments.0.Text // first comment
+ * usercomments.1.0 // second user, first comment
+ * if (score >= 8.5) // 8.5 as a number
+
+ if l.peek() == '.' {
+ l.accept(".")
+ if !l.accept(tokenDigits) {
+ return l.errorf("Malformed number.")
+ }
+ l.acceptRun(tokenDigits)
+ }
+ */
+ l.emit(TokenNumber)
+ return l.stateCode
+}
+
+func (l *lexer) stateString() lexerStateFn {
+ quotationMark := l.value()
+ l.ignore()
+ l.startcol-- // we're starting the position at the first "
+ for !l.accept(quotationMark) {
+ switch l.next() {
+ case '\\':
+ // escape sequence
+ switch l.peek() {
+ case '"', '\\':
+ l.next()
+ default:
+ return l.errorf("Unknown escape sequence: \\%c", l.peek())
+ }
+ case EOF:
+ return l.errorf("Unexpected EOF, string not closed.")
+ case '\n':
+ return l.errorf("Newline in string is not allowed.")
+ }
+ }
+ l.backup()
+ l.emit(TokenString)
+
+ l.next()
+ l.ignore()
+
+ return l.stateCode
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go
new file mode 100644
index 00000000..5b039cdf
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes.go
@@ -0,0 +1,16 @@
+package pongo2
+
+// The root document
+type nodeDocument struct {
+ Nodes []INode
+}
+
+func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range doc.Nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go
new file mode 100644
index 00000000..b980a3a5
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes_html.go
@@ -0,0 +1,23 @@
+package pongo2
+
+import (
+ "strings"
+)
+
+type nodeHTML struct {
+ token *Token
+ trimLeft bool
+ trimRight bool
+}
+
+func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ res := n.token.Val
+ if n.trimLeft {
+ res = strings.TrimLeft(res, tokenSpaceChars)
+ }
+ if n.trimRight {
+ res = strings.TrimRight(res, tokenSpaceChars)
+ }
+ writer.WriteString(res)
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
new file mode 100644
index 00000000..d1bcb8d8
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
@@ -0,0 +1,16 @@
+package pongo2
+
+type NodeWrapper struct {
+ Endtag string
+ nodes []INode
+}
+
+func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, n := range wrapper.nodes {
+ err := n.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go
new file mode 100644
index 00000000..9c39e467
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/options.go
@@ -0,0 +1,26 @@
+package pongo2
+
+// Options allow you to change the behavior of template-engine.
+// You can change the options before calling the Execute method.
+type Options struct {
+ // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
+ TrimBlocks bool
+
+ // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
+ LStripBlocks bool
+}
+
+func newOptions() *Options {
+ return &Options{
+ TrimBlocks: false,
+ LStripBlocks: false,
+ }
+}
+
+// Update updates this options from another options.
+func (opt *Options) Update(other *Options) *Options {
+ opt.TrimBlocks = other.TrimBlocks
+ opt.LStripBlocks = other.LStripBlocks
+
+ return opt
+}
diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go
new file mode 100644
index 00000000..19553f17
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser.go
@@ -0,0 +1,309 @@
+package pongo2
+
+import (
+ "fmt"
+ "strings"
+
+ "errors"
+)
+
+type INode interface {
+ Execute(*ExecutionContext, TemplateWriter) *Error
+}
+
+type IEvaluator interface {
+ INode
+ GetPositionToken() *Token
+ Evaluate(*ExecutionContext) (*Value, *Error)
+ FilterApplied(name string) bool
+}
+
+// The parser provides you a comprehensive and easy tool to
+// work with the template document and arguments provided by
+// the user for your custom tag.
+//
+// The parser works on a token list which will be provided by pongo2.
+// A token is a unit you can work with. Tokens are either of type identifier,
+// string, number, keyword, HTML or symbol.
+//
+// (See Token's documentation for more about tokens)
+type Parser struct {
+ name string
+ idx int
+ tokens []*Token
+ lastToken *Token
+
+ // if the parser parses a template document, here will be
+ // a reference to it (needed to access the template through Tags)
+ template *Template
+}
+
+// Creates a new parser to parse tokens.
+// Used inside pongo2 to parse documents and to provide an easy-to-use
+// parser for tag authors
+func newParser(name string, tokens []*Token, template *Template) *Parser {
+ p := &Parser{
+ name: name,
+ tokens: tokens,
+ template: template,
+ }
+ if len(tokens) > 0 {
+ p.lastToken = tokens[len(tokens)-1]
+ }
+ return p
+}
+
+// Consume one token. It will be gone forever.
+func (p *Parser) Consume() {
+ p.ConsumeN(1)
+}
+
+// Consume N tokens. They will be gone forever.
+func (p *Parser) ConsumeN(count int) {
+ p.idx += count
+}
+
+// Returns the current token.
+func (p *Parser) Current() *Token {
+ return p.Get(p.idx)
+}
+
+// Returns the CURRENT token if the given type matches.
+// Consumes this token on success.
+func (p *Parser) MatchType(typ TokenType) *Token {
+ if t := p.PeekType(typ); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// Consumes this token on success.
+func (p *Parser) Match(typ TokenType, val string) *Token {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// Consumes this token on success.
+func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
+ for _, val := range vals {
+ if t := p.Peek(typ, val); t != nil {
+ p.Consume()
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the CURRENT token if the given type matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekType(typ TokenType) *Token {
+ return p.PeekTypeN(0, typ)
+}
+
+// Returns the CURRENT token if the given type AND value matches.
+// It DOES NOT consume the token.
+func (p *Parser) Peek(typ TokenType, val string) *Token {
+ return p.PeekN(0, typ, val)
+}
+
+// Returns the CURRENT token if the given type AND *one* of
+// the given values matches.
+// It DOES NOT consume the token.
+func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
+ for _, v := range vals {
+ t := p.PeekN(0, typ, v)
+ if t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the
+// given type AND value matches for that token.
+// DOES NOT consume the token.
+func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ && t.Val == val {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the tokens[current position + shift] token if the given type matches.
+// DOES NOT consume the token for that token.
+func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
+ t := p.Get(p.idx + shift)
+ if t != nil {
+ if t.Typ == typ {
+ return t
+ }
+ }
+ return nil
+}
+
+// Returns the UNCONSUMED token count.
+func (p *Parser) Remaining() int {
+ return len(p.tokens) - p.idx
+}
+
+// Returns the total token count.
+func (p *Parser) Count() int {
+ return len(p.tokens)
+}
+
+// Returns tokens[i] or NIL (if i >= len(tokens))
+func (p *Parser) Get(i int) *Token {
+ if i < len(p.tokens) && i >= 0 {
+ return p.tokens[i]
+ }
+ return nil
+}
+
+// Returns tokens[current-position + shift] or NIL
+// (if (current-position + i) >= len(tokens))
+func (p *Parser) GetR(shift int) *Token {
+ i := p.idx + shift
+ return p.Get(i)
+}
+
+// Error produces a nice error message and returns an error-object.
+// The 'token'-argument is optional. If provided, it will take
+// the token's position information. If not provided, it will
+// automatically use the CURRENT token's position information.
+func (p *Parser) Error(msg string, token *Token) *Error {
+ if token == nil {
+ // Set current token
+ token = p.Current()
+ if token == nil {
+ // Set to last token
+ if len(p.tokens) > 0 {
+ token = p.tokens[len(p.tokens)-1]
+ }
+ }
+ }
+ var line, col int
+ if token != nil {
+ line = token.Line
+ col = token.Col
+ }
+ return &Error{
+ Template: p.template,
+ Filename: p.name,
+ Sender: "parser",
+ Line: line,
+ Column: col,
+ Token: token,
+ OrigError: errors.New(msg),
+ }
+}
+
+// Wraps all nodes between starting tag and "{% endtag %}" and provides
+// one simple interface to execute the wrapped nodes.
+// It returns a parser to process provided arguments to the tag.
+func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
+ wrapper := &NodeWrapper{}
+
+ var tagArgs []*Token
+
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tagIdent := p.PeekTypeN(1, TokenIdentifier)
+
+ if tagIdent != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tagIdent.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Okay, end the wrapping here
+ wrapper.Endtag = tagIdent.Val
+ return wrapper, newParser(p.template.name, tagArgs, p.template), nil
+ }
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
+ }
+ tagArgs = append(tagArgs, t)
+ }
+ }
+ }
+
+ }
+
+ // Otherwise process next element to be wrapped
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, nil, err
+ }
+ wrapper.nodes = append(wrapper.nodes, node)
+ }
+
+ return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
+ p.lastToken)
+}
+
+// Skips all nodes between starting tag and "{% endtag %}"
+func (p *Parser) SkipUntilTag(names ...string) *Error {
+ for p.Remaining() > 0 {
+ // New tag, check whether we have to stop wrapping here
+ if p.Peek(TokenSymbol, "{%") != nil {
+ tagIdent := p.PeekTypeN(1, TokenIdentifier)
+
+ if tagIdent != nil {
+ // We've found a (!) end-tag
+
+ found := false
+ for _, n := range names {
+ if tagIdent.Val == n {
+ found = true
+ break
+ }
+ }
+
+ // We only process the tag if we've found an end tag
+ if found {
+ // Okay, endtag found.
+ p.ConsumeN(2) // '{%' tagname
+
+ for {
+ if p.Match(TokenSymbol, "%}") != nil {
+ // Done skipping, exit.
+ return nil
+ }
+ }
+ }
+ }
+ }
+ t := p.Current()
+ p.Consume()
+ if t == nil {
+ return p.Error("Unexpected EOF.", p.lastToken)
+ }
+ }
+
+ return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
+}
diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go
new file mode 100644
index 00000000..e3ac2c8e
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser_document.go
@@ -0,0 +1,59 @@
+package pongo2
+
+// Doc = { ( Filter | Tag | HTML ) }
+func (p *Parser) parseDocElement() (INode, *Error) {
+ t := p.Current()
+
+ switch t.Typ {
+ case TokenHTML:
+ n := &nodeHTML{token: t}
+ left := p.PeekTypeN(-1, TokenSymbol)
+ right := p.PeekTypeN(1, TokenSymbol)
+ n.trimLeft = left != nil && left.TrimWhitespaces
+ n.trimRight = right != nil && right.TrimWhitespaces
+ p.Consume() // consume HTML element
+ return n, nil
+ case TokenSymbol:
+ switch t.Val {
+ case "{{":
+ // parse variable
+ variable, err := p.parseVariableElement()
+ if err != nil {
+ return nil, err
+ }
+ return variable, nil
+ case "{%":
+ // parse tag
+ tag, err := p.parseTagElement()
+ if err != nil {
+ return nil, err
+ }
+ return tag, nil
+ }
+ }
+ return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
+}
+
+func (tpl *Template) parse() *Error {
+ tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
+ doc, err := tpl.parser.parseDocument()
+ if err != nil {
+ return err
+ }
+ tpl.root = doc
+ return nil
+}
+
+func (p *Parser) parseDocument() (*nodeDocument, *Error) {
+ doc := &nodeDocument{}
+
+ for p.Remaining() > 0 {
+ node, err := p.parseDocElement()
+ if err != nil {
+ return nil, err
+ }
+ doc.Nodes = append(doc.Nodes, node)
+ }
+
+ return doc, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go
new file mode 100644
index 00000000..215b0afb
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/parser_expression.go
@@ -0,0 +1,517 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type Expression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type relationalExpression struct {
+ // TODO: Add location token?
+ expr1 IEvaluator
+ expr2 IEvaluator
+ opToken *Token
+}
+
+type simpleExpression struct {
+ negate bool
+ negativeSign bool
+ term1 IEvaluator
+ term2 IEvaluator
+ opToken *Token
+}
+
+type term struct {
+ // TODO: Add location token?
+ factor1 IEvaluator
+ factor2 IEvaluator
+ opToken *Token
+}
+
+type power struct {
+ // TODO: Add location token?
+ power1 IEvaluator
+ power2 IEvaluator
+}
+
+func (expr *Expression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *relationalExpression) FilterApplied(name string) bool {
+ return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
+ (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
+}
+
+func (expr *simpleExpression) FilterApplied(name string) bool {
+ return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
+ (expr.term2 != nil && expr.term2.FilterApplied(name)))
+}
+
+func (expr *term) FilterApplied(name string) bool {
+ return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
+ (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
+}
+
+func (expr *power) FilterApplied(name string) bool {
+ return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
+ (expr.power2 != nil && expr.power2.FilterApplied(name)))
+}
+
+func (expr *Expression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *relationalExpression) GetPositionToken() *Token {
+ return expr.expr1.GetPositionToken()
+}
+
+func (expr *simpleExpression) GetPositionToken() *Token {
+ return expr.term1.GetPositionToken()
+}
+
+func (expr *term) GetPositionToken() *Token {
+ return expr.factor1.GetPositionToken()
+}
+
+func (expr *power) GetPositionToken() *Token {
+ return expr.power1.GetPositionToken()
+}
+
+func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ switch expr.opToken.Val {
+ case "and", "&&":
+ if !v1.IsTrue() {
+ return AsValue(false), nil
+ } else {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(v2.IsTrue()), nil
+ }
+ case "or", "||":
+ if v1.IsTrue() {
+ return AsValue(true), nil
+ } else {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(v2.IsTrue()), nil
+ }
+ default:
+ return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ v1, err := expr.expr1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.expr2 != nil {
+ v2, err := expr.expr2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "<=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() <= v2.Float()), nil
+ }
+ if v1.IsTime() && v2.IsTime() {
+ tm1, tm2 := v1.Time(), v2.Time()
+ return AsValue(tm1.Before(tm2) || tm1.Equal(tm2)), nil
+ }
+ return AsValue(v1.Integer() <= v2.Integer()), nil
+ case ">=":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() >= v2.Float()), nil
+ }
+ if v1.IsTime() && v2.IsTime() {
+ tm1, tm2 := v1.Time(), v2.Time()
+ return AsValue(tm1.After(tm2) || tm1.Equal(tm2)), nil
+ }
+ return AsValue(v1.Integer() >= v2.Integer()), nil
+ case "==":
+ return AsValue(v1.EqualValueTo(v2)), nil
+ case ">":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() > v2.Float()), nil
+ }
+ if v1.IsTime() && v2.IsTime() {
+ return AsValue(v1.Time().After(v2.Time())), nil
+ }
+ return AsValue(v1.Integer() > v2.Integer()), nil
+ case "<":
+ if v1.IsFloat() || v2.IsFloat() {
+ return AsValue(v1.Float() < v2.Float()), nil
+ }
+ if v1.IsTime() && v2.IsTime() {
+ return AsValue(v1.Time().Before(v2.Time())), nil
+ }
+ return AsValue(v1.Integer() < v2.Integer()), nil
+ case "!=", "<>":
+ return AsValue(!v1.EqualValueTo(v2)), nil
+ case "in":
+ return AsValue(v2.Contains(v1)), nil
+ default:
+ return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
+ }
+ } else {
+ return v1, nil
+ }
+}
+
+func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ t1, err := expr.term1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result := t1
+
+ if expr.negate {
+ result = result.Negate()
+ }
+
+ if expr.negativeSign {
+ if result.IsNumber() {
+ switch {
+ case result.IsFloat():
+ result = AsValue(-1 * result.Float())
+ case result.IsInteger():
+ result = AsValue(-1 * result.Integer())
+ default:
+ return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
+ }
+ } else {
+ return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
+ }
+ }
+
+ if expr.term2 != nil {
+ t2, err := expr.term2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "+":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() + t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() + t2.Integer()), nil
+ case "-":
+ if result.IsFloat() || t2.IsFloat() {
+ // Result will be a float
+ return AsValue(result.Float() - t2.Float()), nil
+ }
+ // Result will be an integer
+ return AsValue(result.Integer() - t2.Integer()), nil
+ default:
+ return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
+ }
+ }
+
+ return result, nil
+}
+
+func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ f1, err := expr.factor1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.factor2 != nil {
+ f2, err := expr.factor2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ switch expr.opToken.Val {
+ case "*":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() * f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() * f2.Integer()), nil
+ case "/":
+ if f1.IsFloat() || f2.IsFloat() {
+ // Result will be float
+ return AsValue(f1.Float() / f2.Float()), nil
+ }
+ // Result will be int
+ return AsValue(f1.Integer() / f2.Integer()), nil
+ case "%":
+ // Result will be int
+ return AsValue(f1.Integer() % f2.Integer()), nil
+ default:
+ return nil, ctx.Error("unimplemented", expr.opToken)
+ }
+ } else {
+ return f1, nil
+ }
+}
+
+func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ p1, err := expr.power1.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if expr.power2 != nil {
+ p2, err := expr.power2.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return AsValue(math.Pow(p1.Float(), p2.Float())), nil
+ }
+ return p1, nil
+}
+
+func (p *Parser) parseFactor() (IEvaluator, *Error) {
+ if p.Match(TokenSymbol, "(") != nil {
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if p.Match(TokenSymbol, ")") == nil {
+ return nil, p.Error("Closing bracket expected after expression", nil)
+ }
+ return expr, nil
+ }
+
+ return p.parseVariableOrLiteralWithFilter()
+}
+
+func (p *Parser) parsePower() (IEvaluator, *Error) {
+ pw := new(power)
+
+ power1, err := p.parseFactor()
+ if err != nil {
+ return nil, err
+ }
+ pw.power1 = power1
+
+ if p.Match(TokenSymbol, "^") != nil {
+ power2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ pw.power2 = power2
+ }
+
+ if pw.power2 == nil {
+ // Shortcut for faster evaluation
+ return pw.power1, nil
+ }
+
+ return pw, nil
+}
+
+func (p *Parser) parseTerm() (IEvaluator, *Error) {
+ returnTerm := new(term)
+
+ factor1, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+ returnTerm.factor1 = factor1
+
+ for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
+ if returnTerm.opToken != nil {
+ // Create new sub-term
+ returnTerm = &term{
+ factor1: returnTerm,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ factor2, err := p.parsePower()
+ if err != nil {
+ return nil, err
+ }
+
+ returnTerm.opToken = op
+ returnTerm.factor2 = factor2
+ }
+
+ if returnTerm.opToken == nil {
+ // Shortcut for faster evaluation
+ return returnTerm.factor1, nil
+ }
+
+ return returnTerm, nil
+}
+
+func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
+ expr := new(simpleExpression)
+
+ if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
+ if sign.Val == "-" {
+ expr.negativeSign = true
+ }
+ }
+
+ if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
+ expr.negate = true
+ }
+
+ term1, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+ expr.term1 = term1
+
+ for p.PeekOne(TokenSymbol, "+", "-") != nil {
+ if expr.opToken != nil {
+ // New sub expr
+ expr = &simpleExpression{
+ term1: expr,
+ }
+ }
+
+ op := p.Current()
+ p.Consume()
+
+ term2, err := p.parseTerm()
+ if err != nil {
+ return nil, err
+ }
+
+ expr.term2 = term2
+ expr.opToken = op
+ }
+
+ if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
+ // Shortcut for faster evaluation
+ return expr.term1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
+ expr1, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ expr := &relationalExpression{
+ expr1: expr1,
+ }
+
+ if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
+ expr2, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
+ expr2, err := p.parseSimpleExpression()
+ if err != nil {
+ return nil, err
+ }
+ expr.opToken = t
+ expr.expr2 = expr2
+ }
+
+ if expr.expr2 == nil {
+ // Shortcut for faster evaluation
+ return expr.expr1, nil
+ }
+
+ return expr, nil
+}
+
+func (p *Parser) ParseExpression() (IEvaluator, *Error) {
+ rexpr1, err := p.parseRelationalExpression()
+ if err != nil {
+ return nil, err
+ }
+
+ exp := &Expression{
+ expr1: rexpr1,
+ }
+
+ if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
+ op := p.Current()
+ p.Consume()
+ expr2, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ exp.expr2 = expr2
+ exp.opToken = op
+ }
+
+ if exp.expr2 == nil {
+ // Shortcut for faster evaluation
+ return exp.expr1, nil
+ }
+
+ return exp, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go
new file mode 100644
index 00000000..eda3aa07
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/pongo2.go
@@ -0,0 +1,14 @@
+package pongo2
+
+// Version string
+const Version = "dev"
+
+// Must panics, if a Template couldn't successfully parsed. This is how you
+// would use it:
+// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
+func Must(tpl *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return tpl
+}
diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go
new file mode 100644
index 00000000..710ee252
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags.go
@@ -0,0 +1,133 @@
+package pongo2
+
+/* Incomplete:
+ -----------
+
+ verbatim (only the "name" argument is missing for verbatim)
+
+ Reconsideration:
+ ----------------
+
+ debug (reason: not sure what to output yet)
+ regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
+
+ Following built-in tags wont be added:
+ --------------------------------------
+
+ csrf_token (reason: web-framework specific)
+ load (reason: python-specific)
+ url (reason: web-framework specific)
+*/
+
+import (
+ "fmt"
+)
+
+type INodeTag interface {
+ INode
+}
+
+// This is the function signature of the tag's parser you will have
+// to implement in order to create a new tag.
+//
+// 'doc' is providing access to the whole document while 'arguments'
+// is providing access to the user's arguments to the tag:
+//
+// {% your_tag_name some "arguments" 123 %}
+//
+// start_token will be the *Token with the tag's name in it (here: your_tag_name).
+//
+// Please see the Parser documentation on how to use the parser.
+// See RegisterTag()'s documentation for more information about
+// writing a tag as well.
+type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
+
+type tag struct {
+ name string
+ parser TagParser
+}
+
+var tags map[string]*tag
+
+func init() {
+ tags = make(map[string]*tag)
+}
+
+// Registers a new tag. You usually want to call this
+// function in the tag's init() function:
+// http://golang.org/doc/effective_go.html#init
+//
+// See http://www.florian-schlachter.de/post/pongo2/ for more about
+// writing filters and tags.
+func RegisterTag(name string, parserFn TagParser) error {
+ _, existing := tags[name]
+ if existing {
+ return fmt.Errorf("tag with name '%s' is already registered", name)
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+ return nil
+}
+
+// Replaces an already registered tag with a new implementation. Use this
+// function with caution since it allows you to change existing tag behaviour.
+func ReplaceTag(name string, parserFn TagParser) error {
+ _, existing := tags[name]
+ if !existing {
+ return fmt.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
+ }
+ tags[name] = &tag{
+ name: name,
+ parser: parserFn,
+ }
+ return nil
+}
+
+// Tag = "{%" IDENT ARGS "%}"
+func (p *Parser) parseTagElement() (INodeTag, *Error) {
+ p.Consume() // consume "{%"
+ tokenName := p.MatchType(TokenIdentifier)
+
+ // Check for identifier
+ if tokenName == nil {
+ return nil, p.Error("Tag name must be an identifier.", nil)
+ }
+
+ // Check for the existing tag
+ tag, exists := tags[tokenName.Val]
+ if !exists {
+ // Does not exists
+ return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
+ }
+
+ // Check sandbox tag restriction
+ if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
+ }
+
+ var argsToken []*Token
+ for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
+ // Add token to args
+ argsToken = append(argsToken, p.Current())
+ p.Consume() // next token
+ }
+
+ // EOF?
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
+ }
+
+ p.Match(TokenSymbol, "%}")
+
+ argParser := newParser(p.name, argsToken, p.template)
+ if len(argsToken) == 0 {
+ // This is done to have nice EOF error messages
+ argParser.lastToken = tokenName
+ }
+
+ p.template.level++
+ defer func() { p.template.level-- }()
+ return tag.parser(p, tokenName, argParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go
new file mode 100644
index 00000000..590a1db3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_autoescape.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagAutoescapeNode struct {
+ wrapper *NodeWrapper
+ autoescape bool
+}
+
+func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ old := ctx.Autoescape
+ ctx.Autoescape = node.autoescape
+
+ err := node.wrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ ctx.Autoescape = old
+
+ return nil
+}
+
+func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ autoescapeNode := &tagAutoescapeNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endautoescape")
+ if err != nil {
+ return nil, err
+ }
+ autoescapeNode.wrapper = wrapper
+
+ modeToken := arguments.MatchType(TokenIdentifier)
+ if modeToken == nil {
+ return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
+ }
+ if modeToken.Val == "on" {
+ autoescapeNode.autoescape = true
+ } else if modeToken.Val == "off" {
+ autoescapeNode.autoescape = false
+ } else {
+ return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
+ }
+
+ return autoescapeNode, nil
+}
+
+func init() {
+ RegisterTag("autoescape", tagAutoescapeParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go
new file mode 100644
index 00000000..86145f32
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_block.go
@@ -0,0 +1,129 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagBlockNode struct {
+ name string
+}
+
+func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
+ nodeWrappers := make([]*NodeWrapper, 0)
+ var t *NodeWrapper
+
+ for tpl != nil {
+ t = tpl.blocks[node.name]
+ if t != nil {
+ nodeWrappers = append(nodeWrappers, t)
+ }
+ tpl = tpl.child
+ }
+
+ return nodeWrappers
+}
+
+func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ tpl := ctx.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+
+ // Determine the block to execute
+ blockWrappers := node.getBlockWrappers(tpl)
+ lenBlockWrappers := len(blockWrappers)
+
+ if lenBlockWrappers == 0 {
+ return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
+ }
+
+ blockWrapper := blockWrappers[lenBlockWrappers-1]
+ ctx.Private["block"] = tagBlockInformation{
+ ctx: ctx,
+ wrappers: blockWrappers[0 : lenBlockWrappers-1],
+ }
+ err := blockWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type tagBlockInformation struct {
+ ctx *ExecutionContext
+ wrappers []*NodeWrapper
+}
+
+func (t tagBlockInformation) Super() string {
+ lenWrappers := len(t.wrappers)
+
+ if lenWrappers == 0 {
+ return ""
+ }
+
+ superCtx := NewChildExecutionContext(t.ctx)
+ superCtx.Private["block"] = tagBlockInformation{
+ ctx: t.ctx,
+ wrappers: t.wrappers[0 : lenWrappers-1],
+ }
+
+ blockWrapper := t.wrappers[lenWrappers-1]
+ buf := bytes.NewBufferString("")
+ err := blockWrapper.Execute(superCtx, &templateWriter{buf})
+ if err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
+ }
+
+ if arguments.Remaining() != 0 {
+ return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
+ }
+
+ wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
+ if err != nil {
+ return nil, err
+ }
+ if endtagargs.Remaining() > 0 {
+ endtagnameToken := endtagargs.MatchType(TokenIdentifier)
+ if endtagnameToken != nil {
+ if endtagnameToken.Val != nameToken.Val {
+ return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
+ nameToken.Val, endtagnameToken.Val), nil)
+ }
+ }
+
+ if endtagnameToken == nil || endtagargs.Remaining() > 0 {
+ return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
+ }
+ }
+
+ tpl := doc.template
+ if tpl == nil {
+ panic("internal error: tpl == nil")
+ }
+ _, hasBlock := tpl.blocks[nameToken.Val]
+ if !hasBlock {
+ tpl.blocks[nameToken.Val] = wrapper
+ } else {
+ return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
+ }
+
+ return &tagBlockNode{name: nameToken.Val}, nil
+}
+
+func init() {
+ RegisterTag("block", tagBlockParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go
new file mode 100644
index 00000000..56a02ed9
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_comment.go
@@ -0,0 +1,27 @@
+package pongo2
+
+type tagCommentNode struct{}
+
+func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ commentNode := &tagCommentNode{}
+
+ // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
+ err := doc.SkipUntilTag("endcomment")
+ if err != nil {
+ return nil, err
+ }
+
+ if arguments.Count() != 0 {
+ return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
+ }
+
+ return commentNode, nil
+}
+
+func init() {
+ RegisterTag("comment", tagCommentParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go
new file mode 100644
index 00000000..ffbd254e
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_cycle.go
@@ -0,0 +1,106 @@
+package pongo2
+
+type tagCycleValue struct {
+ node *tagCycleNode
+ value *Value
+}
+
+type tagCycleNode struct {
+ position *Token
+ args []IEvaluator
+ idx int
+ asName string
+ silent bool
+}
+
+func (cv *tagCycleValue) String() string {
+ return cv.value.String()
+}
+
+func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ item := node.args[node.idx%len(node.args)]
+ node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if t, ok := val.Interface().(*tagCycleValue); ok {
+ // {% cycle "test1" "test2"
+ // {% cycle cycleitem %}
+
+ // Update the cycle value with next value
+ item := t.node.args[t.node.idx%len(t.node.args)]
+ t.node.idx++
+
+ val, err := item.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ t.value = val
+
+ if !t.node.silent {
+ writer.WriteString(val.String())
+ }
+ } else {
+ // Regular call
+
+ cycleValue := &tagCycleValue{
+ node: node,
+ value: val,
+ }
+
+ if node.asName != "" {
+ ctx.Private[node.asName] = cycleValue
+ }
+ if !node.silent {
+ writer.WriteString(val.String())
+ }
+ }
+
+ return nil
+}
+
+// HINT: We're not supporting the old comma-separated list of expressions argument-style
+func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ cycleNode := &tagCycleNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ cycleNode.args = append(cycleNode.args, node)
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // as
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
+ }
+ cycleNode.asName = nameToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "silent") != nil {
+ cycleNode.silent = true
+ }
+
+ // Now we're finished
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed cycle-tag.", nil)
+ }
+
+ return cycleNode, nil
+}
+
+func init() {
+ RegisterTag("cycle", tagCycleParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go
new file mode 100644
index 00000000..5771020a
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_extends.go
@@ -0,0 +1,52 @@
+package pongo2
+
+type tagExtendsNode struct {
+ filename string
+}
+
+func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ extendsNode := &tagExtendsNode{}
+
+ if doc.template.level > 1 {
+ return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
+ }
+
+ if doc.template.parent != nil {
+ // Already one parent
+ return nil, arguments.Error("This template has already one parent.", start)
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // Get parent's filename
+ parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ parentTemplate, err := doc.template.set.FromFile(parentFilename)
+ if err != nil {
+ return nil, err.(*Error)
+ }
+
+ // Keep track of things
+ parentTemplate.child = doc.template
+ doc.template.parent = parentTemplate
+ extendsNode.filename = parentFilename
+ } else {
+ return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
+ }
+
+ return extendsNode, nil
+}
+
+func init() {
+ RegisterTag("extends", tagExtendsParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go
new file mode 100644
index 00000000..b38fd929
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_filter.go
@@ -0,0 +1,95 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type nodeFilterCall struct {
+ name string
+ paramExpr IEvaluator
+}
+
+type tagFilterNode struct {
+ position *Token
+ bodyWrapper *NodeWrapper
+ filterChain []*nodeFilterCall
+}
+
+func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
+
+ err := node.bodyWrapper.Execute(ctx, temp)
+ if err != nil {
+ return err
+ }
+
+ value := AsValue(temp.String())
+
+ for _, call := range node.filterChain {
+ var param *Value
+ if call.paramExpr != nil {
+ param, err = call.paramExpr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ } else {
+ param = AsValue(nil)
+ }
+ value, err = ApplyFilter(call.name, value, param)
+ if err != nil {
+ return ctx.Error(err.Error(), node.position)
+ }
+ }
+
+ writer.WriteString(value.String())
+
+ return nil
+}
+
+func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ filterNode := &tagFilterNode{
+ position: start,
+ }
+
+ wrapper, _, err := doc.WrapUntilTag("endfilter")
+ if err != nil {
+ return nil, err
+ }
+ filterNode.bodyWrapper = wrapper
+
+ for arguments.Remaining() > 0 {
+ filterCall := &nodeFilterCall{}
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected a filter name (identifier).", nil)
+ }
+ filterCall.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, ":") != nil {
+ // Filter parameter
+ // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
+ expr, err := arguments.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ filterCall.paramExpr = expr
+ }
+
+ filterNode.filterChain = append(filterNode.filterChain, filterCall)
+
+ if arguments.MatchOne(TokenSymbol, "|") == nil {
+ break
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed filter-tag arguments.", nil)
+ }
+
+ return filterNode, nil
+}
+
+func init() {
+ RegisterTag("filter", tagFilterParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go
new file mode 100644
index 00000000..5b2888e2
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_firstof.go
@@ -0,0 +1,49 @@
+package pongo2
+
+type tagFirstofNode struct {
+ position *Token
+ args []IEvaluator
+}
+
+func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for _, arg := range node.args {
+ val, err := arg.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if val.IsTrue() {
+ if ctx.Autoescape && !arg.FilterApplied("safe") {
+ val, err = ApplyFilter("escape", val, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(val.String())
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ firstofNode := &tagFirstofNode{
+ position: start,
+ }
+
+ for arguments.Remaining() > 0 {
+ node, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ firstofNode.args = append(firstofNode.args, node)
+ }
+
+ return firstofNode, nil
+}
+
+func init() {
+ RegisterTag("firstof", tagFirstofParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go
new file mode 100644
index 00000000..5b0b5554
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_for.go
@@ -0,0 +1,159 @@
+package pongo2
+
+type tagForNode struct {
+ key string
+ value string // only for maps: for key, value in map
+ objectEvaluator IEvaluator
+ reversed bool
+ sorted bool
+
+ bodyWrapper *NodeWrapper
+ emptyWrapper *NodeWrapper
+}
+
+type tagForLoopInformation struct {
+ Counter int
+ Counter0 int
+ Revcounter int
+ Revcounter0 int
+ First bool
+ Last bool
+ Parentloop *tagForLoopInformation
+}
+
+func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
+ // Backup forloop (as parentloop in public context), key-name and value-name
+ forCtx := NewChildExecutionContext(ctx)
+ parentloop := forCtx.Private["forloop"]
+
+ // Create loop struct
+ loopInfo := &tagForLoopInformation{
+ First: true,
+ }
+
+ // Is it a loop in a loop?
+ if parentloop != nil {
+ loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
+ }
+
+ // Register loopInfo in public context
+ forCtx.Private["forloop"] = loopInfo
+
+ obj, err := node.objectEvaluator.Evaluate(forCtx)
+ if err != nil {
+ return err
+ }
+
+ obj.IterateOrder(func(idx, count int, key, value *Value) bool {
+ // There's something to iterate over (correct type and at least 1 item)
+
+ // Update loop infos and public context
+ forCtx.Private[node.key] = key
+ if value != nil {
+ forCtx.Private[node.value] = value
+ }
+ loopInfo.Counter = idx + 1
+ loopInfo.Counter0 = idx
+ if idx == 1 {
+ loopInfo.First = false
+ }
+ if idx+1 == count {
+ loopInfo.Last = true
+ }
+ loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
+ loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
+
+ // Render elements with updated context
+ err := node.bodyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ return false
+ }
+ return true
+ }, func() {
+ // Nothing to iterate over (maybe wrong type or no items)
+ if node.emptyWrapper != nil {
+ err := node.emptyWrapper.Execute(forCtx, writer)
+ if err != nil {
+ forError = err
+ }
+ }
+ }, node.reversed, node.sorted)
+
+ return forError
+}
+
+func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ forNode := &tagForNode{}
+
+ // Arguments parsing
+ var valueToken *Token
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
+ }
+
+ if arguments.Match(TokenSymbol, ",") != nil {
+ // Value name is provided
+ valueToken = arguments.MatchType(TokenIdentifier)
+ if valueToken == nil {
+ return nil, arguments.Error("Value name must be an identifier.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "in") == nil {
+ return nil, arguments.Error("Expected keyword 'in'.", nil)
+ }
+
+ objectEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ forNode.objectEvaluator = objectEvaluator
+ forNode.key = keyToken.Val
+ if valueToken != nil {
+ forNode.value = valueToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
+ forNode.reversed = true
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
+ forNode.sorted = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed for-loop arguments.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.bodyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "empty" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endfor")
+ if err != nil {
+ return nil, err
+ }
+ forNode.emptyWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return forNode, nil
+}
+
+func init() {
+ RegisterTag("for", tagForParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go
new file mode 100644
index 00000000..3eeaf3b4
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_if.go
@@ -0,0 +1,76 @@
+package pongo2
+
+type tagIfNode struct {
+ conditions []IEvaluator
+ wrappers []*NodeWrapper
+}
+
+func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for i, condition := range node.conditions {
+ result, err := condition.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if result.IsTrue() {
+ return node.wrappers[i].Execute(ctx, writer)
+ }
+ // Last condition?
+ if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
+ return node.wrappers[i+1].Execute(ctx, writer)
+ }
+ }
+ return nil
+}
+
+func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifNode := &tagIfNode{}
+
+ // Parse first and main IF condition
+ condition, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("If-condition is malformed.", nil)
+ }
+
+ // Check the rest
+ for {
+ wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
+ if err != nil {
+ return nil, err
+ }
+ ifNode.wrappers = append(ifNode.wrappers, wrapper)
+
+ if wrapper.Endtag == "elif" {
+ // elif can take a condition
+ condition, err = tagArgs.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifNode.conditions = append(ifNode.conditions, condition)
+
+ if tagArgs.Remaining() > 0 {
+ return nil, tagArgs.Error("Elif-condition is malformed.", nil)
+ }
+ } else {
+ if tagArgs.Count() > 0 {
+ // else/endif can't take any conditions
+ return nil, tagArgs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ if wrapper.Endtag == "endif" {
+ break
+ }
+ }
+
+ return ifNode, nil
+}
+
+func init() {
+ RegisterTag("if", tagIfParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
new file mode 100644
index 00000000..45296a0a
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
@@ -0,0 +1,116 @@
+package pongo2
+
+import (
+ "bytes"
+)
+
+type tagIfchangedNode struct {
+ watchedExpr []IEvaluator
+ lastValues []*Value
+ lastContent []byte
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if len(node.watchedExpr) == 0 {
+ // Check against own rendered body
+
+ buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+ err := node.thenWrapper.Execute(ctx, buf)
+ if err != nil {
+ return err
+ }
+
+ bufBytes := buf.Bytes()
+ if !bytes.Equal(node.lastContent, bufBytes) {
+ // Rendered content changed, output it
+ writer.Write(bufBytes)
+ node.lastContent = bufBytes
+ }
+ } else {
+ nowValues := make([]*Value, 0, len(node.watchedExpr))
+ for _, expr := range node.watchedExpr {
+ val, err := expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ nowValues = append(nowValues, val)
+ }
+
+ // Compare old to new values now
+ changed := len(node.lastValues) == 0
+
+ for idx, oldVal := range node.lastValues {
+ if !oldVal.EqualValueTo(nowValues[idx]) {
+ changed = true
+ break // we can stop here because ONE value changed
+ }
+ }
+
+ node.lastValues = nowValues
+
+ if changed {
+ // Render thenWrapper
+ err := node.thenWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Render elseWrapper
+ err := node.elseWrapper.Execute(ctx, writer)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifchangedNode := &tagIfchangedNode{}
+
+ for arguments.Remaining() > 0 {
+ // Parse condition
+ expr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
+ if err != nil {
+ return nil, err
+ }
+ ifchangedNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifchangedNode, nil
+}
+
+func init() {
+ RegisterTag("ifchanged", tagIfchangedParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go
new file mode 100644
index 00000000..103f1c7b
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifequalNode := &tagIfEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.var1 = var1
+ ifequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifequal")
+ if err != nil {
+ return nil, err
+ }
+ ifequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifequal", tagIfEqualParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
new file mode 100644
index 00000000..0d287d34
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
@@ -0,0 +1,78 @@
+package pongo2
+
+type tagIfNotEqualNode struct {
+ var1, var2 IEvaluator
+ thenWrapper *NodeWrapper
+ elseWrapper *NodeWrapper
+}
+
+func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ r1, err := node.var1.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ r2, err := node.var2.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ result := !r1.EqualValueTo(r2)
+
+ if result {
+ return node.thenWrapper.Execute(ctx, writer)
+ }
+ if node.elseWrapper != nil {
+ return node.elseWrapper.Execute(ctx, writer)
+ }
+ return nil
+}
+
+func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ifnotequalNode := &tagIfNotEqualNode{}
+
+ // Parse two expressions
+ var1, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ var2, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.var1 = var1
+ ifnotequalNode.var2 = var2
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
+ }
+
+ // Wrap then/else-blocks
+ wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.thenWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if wrapper.Endtag == "else" {
+ // if there's an else in the if-statement, we need the else-Block as well
+ wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
+ if err != nil {
+ return nil, err
+ }
+ ifnotequalNode.elseWrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+ }
+
+ return ifnotequalNode, nil
+}
+
+func init() {
+ RegisterTag("ifnotequal", tagIfNotEqualParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go
new file mode 100644
index 00000000..7e0d6a01
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_import.go
@@ -0,0 +1,84 @@
+package pongo2
+
+import (
+ "fmt"
+)
+
+type tagImportNode struct {
+ position *Token
+ filename string
+ macros map[string]*tagMacroNode // alias/name -> macro instance
+}
+
+func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ for name, macro := range node.macros {
+ func(name string, macro *tagMacroNode) {
+ ctx.Private[name] = func(args ...*Value) *Value {
+ return macro.call(ctx, args...)
+ }
+ }(name, macro)
+ }
+ return nil
+}
+
+func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ importNode := &tagImportNode{
+ position: start,
+ macros: make(map[string]*tagMacroNode),
+ }
+
+ filenameToken := arguments.MatchType(TokenString)
+ if filenameToken == nil {
+ return nil, arguments.Error("Import-tag needs a filename as string.", nil)
+ }
+
+ importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ if arguments.Remaining() == 0 {
+ return nil, arguments.Error("You must at least specify one macro to import.", nil)
+ }
+
+ // Compile the given template
+ tpl, err := doc.template.set.FromFile(importNode.filename)
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
+ }
+
+ for arguments.Remaining() > 0 {
+ macroNameToken := arguments.MatchType(TokenIdentifier)
+ if macroNameToken == nil {
+ return nil, arguments.Error("Expected macro name (identifier).", nil)
+ }
+
+ asName := macroNameToken.Val
+ if arguments.Match(TokenKeyword, "as") != nil {
+ aliasToken := arguments.MatchType(TokenIdentifier)
+ if aliasToken == nil {
+ return nil, arguments.Error("Expected macro alias name (identifier).", nil)
+ }
+ asName = aliasToken.Val
+ }
+
+ macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
+ if !has {
+ return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
+ importNode.filename), macroNameToken)
+ }
+
+ importNode.macros[asName] = macroInstance
+
+ if arguments.Remaining() == 0 {
+ break
+ }
+
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ','.", nil)
+ }
+ }
+
+ return importNode, nil
+}
+
+func init() {
+ RegisterTag("import", tagImportParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go
new file mode 100644
index 00000000..6d619fda
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_include.go
@@ -0,0 +1,146 @@
+package pongo2
+
+type tagIncludeNode struct {
+ tpl *Template
+ filenameEvaluator IEvaluator
+ lazy bool
+ only bool
+ filename string
+ withPairs map[string]IEvaluator
+ ifExists bool
+}
+
+func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Building the context for the template
+ includeCtx := make(Context)
+
+ // Fill the context with all data from the parent
+ if !node.only {
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+ }
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ includeCtx[key] = val
+ }
+
+ // Execute the template
+ if node.lazy {
+ // Evaluate the filename
+ filename, err := node.filenameEvaluator.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if filename.String() == "" {
+ return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
+ }
+
+ // Get include-filename
+ includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
+
+ includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
+ if err2 != nil {
+ // if this is ReadFile error, and "if_exists" flag is enabled
+ if node.ifExists && err2.(*Error).Sender == "fromfile" {
+ return nil
+ }
+ return err2.(*Error)
+ }
+ err2 = includedTpl.ExecuteWriter(includeCtx, writer)
+ if err2 != nil {
+ return err2.(*Error)
+ }
+ return nil
+ }
+ // Template is already parsed with static filename
+ err := node.tpl.ExecuteWriter(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ return nil
+}
+
+type tagIncludeEmptyNode struct{}
+
+func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ return nil
+}
+
+func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ includeNode := &tagIncludeNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
+ // prepared, static template
+
+ // "if_exists" flag
+ ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
+
+ // Get include-filename
+ includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
+
+ // Parse the parent
+ includeNode.filename = includedFilename
+ includedTpl, err := doc.template.set.FromFile(includedFilename)
+ if err != nil {
+ // if this is ReadFile error, and "if_exists" token presents we should create and empty node
+ if err.(*Error).Sender == "fromfile" && ifExists {
+ return &tagIncludeEmptyNode{}, nil
+ }
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.tpl = includedTpl
+ } else {
+ // No String, then the user wants to use lazy-evaluation (slower, but possible)
+ filenameEvaluator, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
+ }
+ includeNode.filenameEvaluator = filenameEvaluator
+ includeNode.lazy = true
+ includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
+ }
+
+ // After having parsed the filename we're gonna parse the with+only options
+ if arguments.Match(TokenIdentifier, "with") != nil {
+ for arguments.Remaining() > 0 {
+ // We have at least one key=expr pair (because of starting "with")
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
+ }
+
+ includeNode.withPairs[keyToken.Val] = valueExpr
+
+ // Only?
+ if arguments.Match(TokenIdentifier, "only") != nil {
+ includeNode.only = true
+ break // stop parsing arguments because it's the last option
+ }
+ }
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
+ }
+
+ return includeNode, nil
+}
+
+func init() {
+ RegisterTag("include", tagIncludeParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go
new file mode 100644
index 00000000..7794f6c1
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_lorem.go
@@ -0,0 +1,132 @@
+package pongo2
+
+import (
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+)
+
+var (
+ tagLoremParagraphs = strings.Split(tagLoremText, "\n")
+ tagLoremWords = strings.Fields(tagLoremText)
+)
+
+type tagLoremNode struct {
+ position *Token
+ count int // number of paragraphs
+ method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
+ random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
+}
+
+func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ switch node.method {
+ case "b":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ }
+ }
+ case "w":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[rand.Intn(len(tagLoremWords))]
+ writer.WriteString(word)
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString(" ")
+ }
+ word := tagLoremWords[i%len(tagLoremWords)]
+ writer.WriteString(word)
+ }
+ }
+ case "p":
+ if node.random {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
+ writer.WriteString(par)
+ writer.WriteString("
")
+ }
+ } else {
+ for i := 0; i < node.count; i++ {
+ if i > 0 {
+ writer.WriteString("\n")
+ }
+ writer.WriteString("")
+ par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
+ writer.WriteString(par)
+ writer.WriteString("
")
+
+ }
+ }
+ default:
+ return ctx.OrigError(fmt.Errorf("unsupported method: %s", node.method), nil)
+ }
+
+ return nil
+}
+
+func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ loremNode := &tagLoremNode{
+ position: start,
+ count: 1,
+ method: "b",
+ }
+
+ if countToken := arguments.MatchType(TokenNumber); countToken != nil {
+ loremNode.count = AsValue(countToken.Val).Integer()
+ }
+
+ if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
+ if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
+ return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
+ }
+
+ loremNode.method = methodToken.Val
+ }
+
+ if arguments.MatchOne(TokenIdentifier, "random") != nil {
+ loremNode.random = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
+ }
+
+ return loremNode, nil
+}
+
+func init() {
+ rand.Seed(time.Now().Unix())
+
+ RegisterTag("lorem", tagLoremParser)
+}
+
+const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
+Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
+Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
+Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
+At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
+Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go
new file mode 100644
index 00000000..dd3e0bf4
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_macro.go
@@ -0,0 +1,149 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type tagMacroNode struct {
+ position *Token
+ name string
+ argsOrder []string
+ args map[string]IEvaluator
+ exported bool
+
+ wrapper *NodeWrapper
+}
+
+func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ ctx.Private[node.name] = func(args ...*Value) *Value {
+ return node.call(ctx, args...)
+ }
+
+ return nil
+}
+
+func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
+ argsCtx := make(Context)
+
+ for k, v := range node.args {
+ if v == nil {
+ // User did not provided a default value
+ argsCtx[k] = nil
+ } else {
+ // Evaluate the default value
+ valueExpr, err := v.Evaluate(ctx)
+ if err != nil {
+ ctx.Logf(err.Error())
+ return AsSafeValue(err.Error())
+ }
+
+ argsCtx[k] = valueExpr
+ }
+ }
+
+ if len(args) > len(node.argsOrder) {
+ // Too many arguments, we're ignoring them and just logging into debug mode.
+ err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
+ node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
+
+ ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
+ return AsSafeValue(err.Error())
+ }
+
+ // Make a context for the macro execution
+ macroCtx := NewChildExecutionContext(ctx)
+
+ // Register all arguments in the private context
+ macroCtx.Private.Update(argsCtx)
+
+ for idx, argValue := range args {
+ macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
+ }
+
+ var b bytes.Buffer
+ err := node.wrapper.Execute(macroCtx, &b)
+ if err != nil {
+ return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
+ }
+
+ return AsSafeValue(b.String())
+}
+
+func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ macroNode := &tagMacroNode{
+ position: start,
+ args: make(map[string]IEvaluator),
+ }
+
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
+ }
+ macroNode.name = nameToken.Val
+
+ if arguments.MatchOne(TokenSymbol, "(") == nil {
+ return nil, arguments.Error("Expected '('.", nil)
+ }
+
+ for arguments.Match(TokenSymbol, ")") == nil {
+ argNameToken := arguments.MatchType(TokenIdentifier)
+ if argNameToken == nil {
+ return nil, arguments.Error("Expected argument name as identifier.", nil)
+ }
+ macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
+
+ if arguments.Match(TokenSymbol, "=") != nil {
+ // Default expression follows
+ argDefaultExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ macroNode.args[argNameToken.Val] = argDefaultExpr
+ } else {
+ // No default expression
+ macroNode.args[argNameToken.Val] = nil
+ }
+
+ if arguments.Match(TokenSymbol, ")") != nil {
+ break
+ }
+ if arguments.Match(TokenSymbol, ",") == nil {
+ return nil, arguments.Error("Expected ',' or ')'.", nil)
+ }
+ }
+
+ if arguments.Match(TokenKeyword, "export") != nil {
+ macroNode.exported = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed macro-tag.", nil)
+ }
+
+ // Body wrapping
+ wrapper, endargs, err := doc.WrapUntilTag("endmacro")
+ if err != nil {
+ return nil, err
+ }
+ macroNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ if macroNode.exported {
+ // Now register the macro if it wants to be exported
+ _, has := doc.template.exportedMacros[macroNode.name]
+ if has {
+ return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
+ }
+ doc.template.exportedMacros[macroNode.name] = macroNode
+ }
+
+ return macroNode, nil
+}
+
+func init() {
+ RegisterTag("macro", tagMacroParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go
new file mode 100644
index 00000000..d9fa4a37
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_now.go
@@ -0,0 +1,50 @@
+package pongo2
+
+import (
+ "time"
+)
+
+type tagNowNode struct {
+ position *Token
+ format string
+ fake bool
+}
+
+func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ var t time.Time
+ if node.fake {
+ t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
+ } else {
+ t = time.Now()
+ }
+
+ writer.WriteString(t.Format(node.format))
+
+ return nil
+}
+
+func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ nowNode := &tagNowNode{
+ position: start,
+ }
+
+ formatToken := arguments.MatchType(TokenString)
+ if formatToken == nil {
+ return nil, arguments.Error("Expected a format string.", nil)
+ }
+ nowNode.format = formatToken.Val
+
+ if arguments.MatchOne(TokenIdentifier, "fake") != nil {
+ nowNode.fake = true
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed now-tag arguments.", nil)
+ }
+
+ return nowNode, nil
+}
+
+func init() {
+ RegisterTag("now", tagNowParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go
new file mode 100644
index 00000000..be121c12
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_set.go
@@ -0,0 +1,50 @@
+package pongo2
+
+type tagSetNode struct {
+ name string
+ expression IEvaluator
+}
+
+func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ // Evaluate expression
+ value, err := node.expression.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ ctx.Private[node.name] = value
+ return nil
+}
+
+func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ node := &tagSetNode{}
+
+ // Parse variable name
+ typeToken := arguments.MatchType(TokenIdentifier)
+ if typeToken == nil {
+ return nil, arguments.Error("Expected an identifier.", nil)
+ }
+ node.name = typeToken.Val
+
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+
+ // Variable expression
+ keyExpression, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expression = keyExpression
+
+ // Remaining arguments
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
+ }
+
+ return node, nil
+}
+
+func init() {
+ RegisterTag("set", tagSetParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go
new file mode 100644
index 00000000..4fa851ba
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_spaceless.go
@@ -0,0 +1,54 @@
+package pongo2
+
+import (
+ "bytes"
+ "regexp"
+)
+
+type tagSpacelessNode struct {
+ wrapper *NodeWrapper
+}
+
+var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
+
+func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
+
+ err := node.wrapper.Execute(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ s := b.String()
+ // Repeat this recursively
+ changed := true
+ for changed {
+ s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
+ changed = s != s2
+ s = s2
+ }
+
+ writer.WriteString(s)
+
+ return nil
+}
+
+func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ spacelessNode := &tagSpacelessNode{}
+
+ wrapper, _, err := doc.WrapUntilTag("endspaceless")
+ if err != nil {
+ return nil, err
+ }
+ spacelessNode.wrapper = wrapper
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
+ }
+
+ return spacelessNode, nil
+}
+
+func init() {
+ RegisterTag("spaceless", tagSpacelessParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go
new file mode 100644
index 00000000..c33858d5
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_ssi.go
@@ -0,0 +1,68 @@
+package pongo2
+
+import (
+ "io/ioutil"
+)
+
+type tagSSINode struct {
+ filename string
+ content string
+ template *Template
+}
+
+func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ if node.template != nil {
+ // Execute the template within the current context
+ includeCtx := make(Context)
+ includeCtx.Update(ctx.Public)
+ includeCtx.Update(ctx.Private)
+
+ err := node.template.execute(includeCtx, writer)
+ if err != nil {
+ return err.(*Error)
+ }
+ } else {
+ // Just print out the content
+ writer.WriteString(node.content)
+ }
+ return nil
+}
+
+func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ SSINode := &tagSSINode{}
+
+ if fileToken := arguments.MatchType(TokenString); fileToken != nil {
+ SSINode.filename = fileToken.Val
+
+ if arguments.Match(TokenIdentifier, "parsed") != nil {
+ // parsed
+ temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.template = temporaryTpl
+ } else {
+ // plaintext
+ buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
+ if err != nil {
+ return nil, (&Error{
+ Sender: "tag:ssi",
+ OrigError: err,
+ }).updateFromTokenIfNeeded(doc.template, fileToken)
+ }
+ SSINode.content = string(buf)
+ }
+ } else {
+ return nil, arguments.Error("First argument must be a string.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed SSI-tag argument.", nil)
+ }
+
+ return SSINode, nil
+}
+
+func init() {
+ RegisterTag("ssi", tagSSIParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go
new file mode 100644
index 00000000..164b4dc3
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_templatetag.go
@@ -0,0 +1,45 @@
+package pongo2
+
+type tagTemplateTagNode struct {
+ content string
+}
+
+var templateTagMapping = map[string]string{
+ "openblock": "{%",
+ "closeblock": "%}",
+ "openvariable": "{{",
+ "closevariable": "}}",
+ "openbrace": "{",
+ "closebrace": "}",
+ "opencomment": "{#",
+ "closecomment": "#}",
+}
+
+func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ writer.WriteString(node.content)
+ return nil
+}
+
+func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ ttNode := &tagTemplateTagNode{}
+
+ if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
+ output, found := templateTagMapping[argToken.Val]
+ if !found {
+ return nil, arguments.Error("Argument not found", argToken)
+ }
+ ttNode.content = output
+ } else {
+ return nil, arguments.Error("Identifier expected.", nil)
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
+ }
+
+ return ttNode, nil
+}
+
+func init() {
+ RegisterTag("templatetag", tagTemplateTagParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go
new file mode 100644
index 00000000..70c9c3e8
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_widthratio.go
@@ -0,0 +1,83 @@
+package pongo2
+
+import (
+ "fmt"
+ "math"
+)
+
+type tagWidthratioNode struct {
+ position *Token
+ current, max IEvaluator
+ width IEvaluator
+ ctxName string
+}
+
+func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ current, err := node.current.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ max, err := node.max.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ width, err := node.width.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
+
+ if node.ctxName == "" {
+ writer.WriteString(fmt.Sprintf("%d", value))
+ } else {
+ ctx.Private[node.ctxName] = value
+ }
+
+ return nil
+}
+
+func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ widthratioNode := &tagWidthratioNode{
+ position: start,
+ }
+
+ current, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.current = current
+
+ max, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.max = max
+
+ width, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ widthratioNode.width = width
+
+ if arguments.MatchOne(TokenKeyword, "as") != nil {
+ // Name follows
+ nameToken := arguments.MatchType(TokenIdentifier)
+ if nameToken == nil {
+ return nil, arguments.Error("Expected name (identifier).", nil)
+ }
+ widthratioNode.ctxName = nameToken.Val
+ }
+
+ if arguments.Remaining() > 0 {
+ return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
+ }
+
+ return widthratioNode, nil
+}
+
+func init() {
+ RegisterTag("widthratio", tagWidthratioParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go
new file mode 100644
index 00000000..32b3c1c4
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/tags_with.go
@@ -0,0 +1,88 @@
+package pongo2
+
+type tagWithNode struct {
+ withPairs map[string]IEvaluator
+ wrapper *NodeWrapper
+}
+
+func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ //new context for block
+ withctx := NewChildExecutionContext(ctx)
+
+ // Put all custom with-pairs into the context
+ for key, value := range node.withPairs {
+ val, err := value.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ withctx.Private[key] = val
+ }
+
+ return node.wrapper.Execute(withctx, writer)
+}
+
+func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
+ withNode := &tagWithNode{
+ withPairs: make(map[string]IEvaluator),
+ }
+
+ if arguments.Count() == 0 {
+ return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
+ }
+
+ wrapper, endargs, err := doc.WrapUntilTag("endwith")
+ if err != nil {
+ return nil, err
+ }
+ withNode.wrapper = wrapper
+
+ if endargs.Count() > 0 {
+ return nil, endargs.Error("Arguments not allowed here.", nil)
+ }
+
+ // Scan through all arguments to see which style the user uses (old or new style).
+ // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
+ oldStyle := false // by default we're using the new_style
+ for i := 0; i < arguments.Count(); i++ {
+ if arguments.PeekN(i, TokenKeyword, "as") != nil {
+ oldStyle = true
+ break
+ }
+ }
+
+ for arguments.Remaining() > 0 {
+ if oldStyle {
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ if arguments.Match(TokenKeyword, "as") == nil {
+ return nil, arguments.Error("Expected 'as' keyword.", nil)
+ }
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ } else {
+ keyToken := arguments.MatchType(TokenIdentifier)
+ if keyToken == nil {
+ return nil, arguments.Error("Expected an identifier", nil)
+ }
+ if arguments.Match(TokenSymbol, "=") == nil {
+ return nil, arguments.Error("Expected '='.", nil)
+ }
+ valueExpr, err := arguments.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ withNode.withPairs[keyToken.Val] = valueExpr
+ }
+ }
+
+ return withNode, nil
+}
+
+func init() {
+ RegisterTag("with", tagWithParser)
+}
diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go
new file mode 100644
index 00000000..47666c94
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template.go
@@ -0,0 +1,276 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type TemplateWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+}
+
+type templateWriter struct {
+ w io.Writer
+}
+
+func (tw *templateWriter) WriteString(s string) (int, error) {
+ return tw.w.Write([]byte(s))
+}
+
+func (tw *templateWriter) Write(b []byte) (int, error) {
+ return tw.w.Write(b)
+}
+
+type Template struct {
+ set *TemplateSet
+
+ // Input
+ isTplString bool
+ name string
+ tpl string
+ size int
+
+ // Calculation
+ tokens []*Token
+ parser *Parser
+
+ // first come, first serve (it's important to not override existing entries in here)
+ level int
+ parent *Template
+ child *Template
+ blocks map[string]*NodeWrapper
+ exportedMacros map[string]*tagMacroNode
+
+ // Output
+ root *nodeDocument
+
+ // Options allow you to change the behavior of template-engine.
+ // You can change the options before calling the Execute method.
+ Options *Options
+}
+
+func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
+ return newTemplate(set, "", true, tpl)
+}
+
+func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
+ strTpl := string(tpl)
+
+ // Create the template
+ t := &Template{
+ set: set,
+ isTplString: isTplString,
+ name: name,
+ tpl: strTpl,
+ size: len(strTpl),
+ blocks: make(map[string]*NodeWrapper),
+ exportedMacros: make(map[string]*tagMacroNode),
+ Options: newOptions(),
+ }
+ // Copy all settings from another Options.
+ t.Options.Update(set.Options)
+
+ // Tokenize it
+ tokens, err := lex(name, strTpl)
+ if err != nil {
+ return nil, err
+ }
+ t.tokens = tokens
+
+ // For debugging purposes, show all tokens:
+ /*for i, t := range tokens {
+ fmt.Printf("%3d. %s\n", i, t)
+ }*/
+
+ // Parse it
+ err = t.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return t, nil
+}
+
+func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
+ if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
+ // Issue #94 https://github.com/flosch/pongo2/issues/94
+ // If an application configures pongo2 template to trim_blocks,
+ // the first newline after a template tag is removed automatically (like in PHP).
+ prev := &Token{
+ Typ: TokenHTML,
+ Val: "\n",
+ }
+
+ for _, t := range tpl.tokens {
+ if tpl.Options.LStripBlocks {
+ if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
+ prev.Val = strings.TrimRight(prev.Val, "\t ")
+ }
+ }
+
+ if tpl.Options.TrimBlocks {
+ if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
+ if len(t.Val) > 0 && t.Val[0] == '\n' {
+ t.Val = t.Val[1:len(t.Val)]
+ }
+ }
+ }
+
+ prev = t
+ }
+ }
+
+ // Determine the parent to be executed (for template inheritance)
+ parent := tpl
+ for parent.parent != nil {
+ parent = parent.parent
+ }
+
+ // Create context if none is given
+ newContext := make(Context)
+ newContext.Update(tpl.set.Globals)
+
+ if context != nil {
+ newContext.Update(context)
+
+ if len(newContext) > 0 {
+ // Check for context name syntax
+ err := newContext.checkForValidIdentifiers()
+ if err != nil {
+ return parent, nil, err
+ }
+
+ // Check for clashes with macro names
+ for k := range newContext {
+ _, has := tpl.exportedMacros[k]
+ if has {
+ return parent, nil, &Error{
+ Filename: tpl.name,
+ Sender: "execution",
+ OrigError: fmt.Errorf("context key name '%s' clashes with macro '%s'", k, k),
+ }
+ }
+ }
+ }
+ }
+
+ // Create operational context
+ ctx := newExecutionContext(parent, newContext)
+
+ return parent, ctx, nil
+}
+
+func (tpl *Template) execute(context Context, writer TemplateWriter) error {
+ parent, ctx, err := tpl.newContextForExecution(context)
+ if err != nil {
+ return err
+ }
+
+ // Run the selected document
+ if err := parent.root.Execute(ctx, writer); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
+ return tpl.execute(context, &templateWriter{w: writer})
+}
+
+func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
+ // Create output buffer
+ // We assume that the rendered template will be 30% larger
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
+ if err := tpl.execute(context, buffer); err != nil {
+ return nil, err
+ }
+ return buffer, nil
+}
+
+// Executes the template with the given context and writes to writer (io.Writer)
+// on success. Context can be nil. Nothing is written on error; instead the error
+// is being returned.
+func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
+ buf, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return err
+ }
+ _, err = buf.WriteTo(writer)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Same as ExecuteWriter. The only difference between both functions is that
+// this function might already have written parts of the generated template in the
+// case of an execution error because there's no intermediate buffer involved for
+// performance reasons. This is handy if you need high performance template
+// generation or if you want to manage your own pool of buffers.
+func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
+ return tpl.newTemplateWriterAndExecute(context, writer)
+}
+
+// Executes the template and returns the rendered template as a []byte
+func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// Executes the template and returns the rendered template as a string
+func (tpl *Template) Execute(context Context) (string, error) {
+ // Execute template
+ buffer, err := tpl.newBufferAndExecute(context)
+ if err != nil {
+ return "", err
+ }
+
+ return buffer.String(), nil
+
+}
+
+func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
+ var parents []*Template
+ result := make(map[string]string)
+
+ parent := tpl
+ for parent != nil {
+ parents = append(parents, parent)
+ parent = parent.parent
+ }
+
+ for _, t := range parents {
+ buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
+ _, ctx, err := t.newContextForExecution(context)
+ if err != nil {
+ return nil, err
+ }
+ for _, blockName := range blocks {
+ if _, ok := result[blockName]; ok {
+ continue
+ }
+ if blockWrapper, ok := t.blocks[blockName]; ok {
+ bErr := blockWrapper.Execute(ctx, buffer)
+ if bErr != nil {
+ return nil, bErr
+ }
+ result[blockName] = buffer.String()
+ buffer.Reset()
+ }
+ }
+ // We have found all blocks
+ if len(blocks) == len(result) {
+ break
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go
new file mode 100644
index 00000000..abd23409
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template_loader.go
@@ -0,0 +1,156 @@
+package pongo2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+// LocalFilesystemLoader represents a local filesystem loader with basic
+// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
+type LocalFilesystemLoader struct {
+ baseDir string
+}
+
+// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
+// and panics if there's any error during instantiation. The parameters
+// are the same like NewLocalFileSystemLoader.
+func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ log.Panic(err)
+ }
+ return fs
+}
+
+// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
+// templatesto be loaded from disk (unrestricted). If any base directory
+// is given (or being set using SetBaseDir), this base directory is being used
+// for path calculation in template inclusions/imports. Otherwise the path
+// is calculated based relatively to the including template's path.
+func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
+ fs := &LocalFilesystemLoader{}
+ if baseDir != "" {
+ if err := fs.SetBaseDir(baseDir); err != nil {
+ return nil, err
+ }
+ }
+ return fs, nil
+}
+
+// SetBaseDir sets the template's base directory. This directory will
+// be used for any relative path in filters, tags and From*-functions to determine
+// your template. See the comment for NewLocalFileSystemLoader as well.
+func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
+ // Make the path absolute
+ if !filepath.IsAbs(path) {
+ abs, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ path = abs
+ }
+
+ // Check for existence
+ fi, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("The given path '%s' is not a directory.", path)
+ }
+
+ fs.baseDir = path
+ return nil
+}
+
+// Get reads the path's content from your local filesystem.
+func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(buf), nil
+}
+
+// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
+// When there's no base dir set, the absolute path to the filename
+// will be calculated based on either the provided base directory (which
+// might be a path of a template which includes another template) or
+// the current working directory.
+func (fs *LocalFilesystemLoader) Abs(base, name string) string {
+ if filepath.IsAbs(name) {
+ return name
+ }
+
+ // Our own base dir has always priority; if there's none
+ // we use the path provided in base.
+ var err error
+ if fs.baseDir == "" {
+ if base == "" {
+ base, err = os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ return filepath.Join(base, name)
+ }
+
+ return filepath.Join(filepath.Dir(base), name)
+ }
+
+ return filepath.Join(fs.baseDir, name)
+}
+
+// SandboxedFilesystemLoader is still WIP.
+type SandboxedFilesystemLoader struct {
+ *LocalFilesystemLoader
+}
+
+// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
+func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
+ fs, err := NewLocalFileSystemLoader(baseDir)
+ if err != nil {
+ return nil, err
+ }
+ return &SandboxedFilesystemLoader{
+ LocalFilesystemLoader: fs,
+ }, nil
+}
+
+// Move sandbox to a virtual fs
+
+/*
+if len(set.SandboxDirectories) > 0 {
+ defer func() {
+ // Remove any ".." or other crap
+ resolvedPath = filepath.Clean(resolvedPath)
+
+ // Make the path absolute
+ absPath, err := filepath.Abs(resolvedPath)
+ if err != nil {
+ panic(err)
+ }
+ resolvedPath = absPath
+
+ // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
+ for _, pattern := range set.SandboxDirectories {
+ matched, err := filepath.Match(pattern, resolvedPath)
+ if err != nil {
+ panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
+ }
+ if matched {
+ // OK!
+ return
+ }
+ }
+
+ // No pattern matched, we have to log+deny the request
+ set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
+ resolvedPath = ""
+ }()
+}
+*/
diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go
new file mode 100644
index 00000000..4b1e43da
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/template_sets.go
@@ -0,0 +1,305 @@
+package pongo2
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "sync"
+
+ "errors"
+)
+
+// TemplateLoader allows to implement a virtual file system.
+type TemplateLoader interface {
+ // Abs calculates the path to a given template. Whenever a path must be resolved
+ // due to an import from another template, the base equals the parent template's path.
+ Abs(base, name string) string
+
+ // Get returns an io.Reader where the template's content can be read from.
+ Get(path string) (io.Reader, error)
+}
+
+// TemplateSet allows you to create your own group of templates with their own
+// global context (which is shared among all members of the set) and their own
+// configuration.
+// It's useful for a separation of different kind of templates
+// (e. g. web templates vs. mail templates).
+type TemplateSet struct {
+ name string
+ loaders []TemplateLoader
+
+ // Globals will be provided to all templates created within this template set
+ Globals Context
+
+ // If debug is true (default false), ExecutionContext.Logf() will work and output
+ // to STDOUT. Furthermore, FromCache() won't cache the templates.
+ // Make sure to synchronize the access to it in case you're changing this
+ // variable during program execution (and template compilation/execution).
+ Debug bool
+
+ // Options allow you to change the behavior of template-engine.
+ // You can change the options before calling the Execute method.
+ Options *Options
+
+ // Sandbox features
+ // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
+ //
+ // For efficiency reasons you can ban tags/filters only *before* you have
+ // added your first template to the set (restrictions are statically checked).
+ // After you added one, it's not possible anymore (for your personal security).
+ firstTemplateCreated bool
+ bannedTags map[string]bool
+ bannedFilters map[string]bool
+
+ // Template cache (for FromCache())
+ templateCache map[string]*Template
+ templateCacheMutex sync.Mutex
+}
+
+// NewSet can be used to create sets with different kind of templates
+// (e. g. web from mail templates), with different globals or
+// other configurations.
+func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
+ if len(loaders) == 0 {
+ panic(fmt.Errorf("at least one template loader must be specified"))
+ }
+
+ return &TemplateSet{
+ name: name,
+ loaders: loaders,
+ Globals: make(Context),
+ bannedTags: make(map[string]bool),
+ bannedFilters: make(map[string]bool),
+ templateCache: make(map[string]*Template),
+ Options: newOptions(),
+ }
+}
+
+func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
+ set.loaders = append(set.loaders, loaders...)
+}
+
+func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
+ return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
+}
+
+func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
+ name := ""
+ if tpl != nil && tpl.isTplString {
+ return path
+ }
+ if tpl != nil {
+ name = tpl.name
+ }
+
+ return loader.Abs(name, path)
+}
+
+// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanTag(name string) error {
+ _, has := tags[name]
+ if !has {
+ return fmt.Errorf("tag '%s' not found", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("you cannot ban any tags after you've added your first template to your template set")
+ }
+ _, has = set.bannedTags[name]
+ if has {
+ return fmt.Errorf("tag '%s' is already banned", name)
+ }
+ set.bannedTags[name] = true
+
+ return nil
+}
+
+// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
+func (set *TemplateSet) BanFilter(name string) error {
+ _, has := filters[name]
+ if !has {
+ return fmt.Errorf("filter '%s' not found", name)
+ }
+ if set.firstTemplateCreated {
+ return errors.New("you cannot ban any filters after you've added your first template to your template set")
+ }
+ _, has = set.bannedFilters[name]
+ if has {
+ return fmt.Errorf("filter '%s' is already banned", name)
+ }
+ set.bannedFilters[name] = true
+
+ return nil
+}
+
+func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
+ // iterate over loaders until we appear to have a valid template
+ for _, loader = range set.loaders {
+ name = set.resolveFilenameForLoader(loader, tpl, path)
+ fd, err = loader.Get(name)
+ if err == nil {
+ return
+ }
+ }
+
+ return path, nil, nil, fmt.Errorf("unable to resolve template")
+}
+
+// CleanCache cleans the template cache. If filenames is not empty,
+// it will remove the template caches of those filenames.
+// Or it will empty the whole template cache. It is thread-safe.
+func (set *TemplateSet) CleanCache(filenames ...string) {
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ if len(filenames) == 0 {
+ set.templateCache = make(map[string]*Template, len(set.templateCache))
+ }
+
+ for _, filename := range filenames {
+ delete(set.templateCache, set.resolveFilename(nil, filename))
+ }
+}
+
+// FromCache is a convenient method to cache templates. It is thread-safe
+// and will only compile the template associated with a filename once.
+// If TemplateSet.Debug is true (for example during development phase),
+// FromCache() will not cache the template and instead recompile it on any
+// call (to make changes to a template live instantaneously).
+func (set *TemplateSet) FromCache(filename string) (*Template, error) {
+ if set.Debug {
+ // Recompile on any request
+ return set.FromFile(filename)
+ }
+ // Cache the template
+ cleanedFilename := set.resolveFilename(nil, filename)
+
+ set.templateCacheMutex.Lock()
+ defer set.templateCacheMutex.Unlock()
+
+ tpl, has := set.templateCache[cleanedFilename]
+
+ // Cache miss
+ if !has {
+ tpl, err := set.FromFile(cleanedFilename)
+ if err != nil {
+ return nil, err
+ }
+ set.templateCache[cleanedFilename] = tpl
+ return tpl, nil
+ }
+
+ // Cache hit
+ return tpl, nil
+}
+
+// FromString loads a template from string and returns a Template instance.
+func (set *TemplateSet) FromString(tpl string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, []byte(tpl))
+}
+
+// FromBytes loads a template from bytes and returns a Template instance.
+func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ return newTemplateString(set, tpl)
+}
+
+// FromFile loads a template from a filename and returns a Template instance.
+func (set *TemplateSet) FromFile(filename string) (*Template, error) {
+ set.firstTemplateCreated = true
+
+ _, _, fd, err := set.resolveTemplate(nil, filename)
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ OrigError: err,
+ }
+ }
+ buf, err := ioutil.ReadAll(fd)
+ if err != nil {
+ return nil, &Error{
+ Filename: filename,
+ Sender: "fromfile",
+ OrigError: err,
+ }
+ }
+
+ return newTemplate(set, filename, false, buf)
+}
+
+// RenderTemplateString is a shortcut and renders a template string directly.
+func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromString(s))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+// RenderTemplateBytes is a shortcut and renders template bytes directly.
+func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromBytes(b))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+// RenderTemplateFile is a shortcut and renders a template file directly.
+func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
+ set.firstTemplateCreated = true
+
+ tpl := Must(set.FromFile(fn))
+ result, err := tpl.Execute(ctx)
+ if err != nil {
+ return "", err
+ }
+ return result, nil
+}
+
+func (set *TemplateSet) logf(format string, args ...interface{}) {
+ if set.Debug {
+ logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
+ }
+}
+
+// Logging function (internally used)
+func logf(format string, items ...interface{}) {
+ if debug {
+ logger.Printf(format, items...)
+ }
+}
+
+var (
+ debug bool // internal debugging
+ logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
+
+ // DefaultLoader allows the default un-sandboxed access to the local file
+ // system and is being used by the DefaultSet.
+ DefaultLoader = MustNewLocalFileSystemLoader("")
+
+ // DefaultSet is a set created for you for convinience reasons.
+ DefaultSet = NewSet("default", DefaultLoader)
+
+ // Methods on the default set
+ FromString = DefaultSet.FromString
+ FromBytes = DefaultSet.FromBytes
+ FromFile = DefaultSet.FromFile
+ FromCache = DefaultSet.FromCache
+ RenderTemplateString = DefaultSet.RenderTemplateString
+ RenderTemplateFile = DefaultSet.RenderTemplateFile
+
+ // Globals for the default set
+ Globals = DefaultSet.Globals
+)
diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go
new file mode 100644
index 00000000..8b49adb7
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/value.go
@@ -0,0 +1,540 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type Value struct {
+ val reflect.Value
+ safe bool // used to indicate whether a Value needs explicit escaping in the template
+}
+
+// AsValue converts any given value to a pongo2.Value
+// Usually being used within own functions passed to a template
+// through a Context or within filter functions.
+//
+// Example:
+// AsValue("my string")
+func AsValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ }
+}
+
+// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
+func AsSafeValue(i interface{}) *Value {
+ return &Value{
+ val: reflect.ValueOf(i),
+ safe: true,
+ }
+}
+
+func (v *Value) getResolvedValue() reflect.Value {
+ if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
+ return v.val.Elem()
+ }
+ return v.val
+}
+
+// IsString checks whether the underlying value is a string
+func (v *Value) IsString() bool {
+ return v.getResolvedValue().Kind() == reflect.String
+}
+
+// IsBool checks whether the underlying value is a bool
+func (v *Value) IsBool() bool {
+ return v.getResolvedValue().Kind() == reflect.Bool
+}
+
+// IsFloat checks whether the underlying value is a float
+func (v *Value) IsFloat() bool {
+ return v.getResolvedValue().Kind() == reflect.Float32 ||
+ v.getResolvedValue().Kind() == reflect.Float64
+}
+
+// IsInteger checks whether the underlying value is an integer
+func (v *Value) IsInteger() bool {
+ return v.getResolvedValue().Kind() == reflect.Int ||
+ v.getResolvedValue().Kind() == reflect.Int8 ||
+ v.getResolvedValue().Kind() == reflect.Int16 ||
+ v.getResolvedValue().Kind() == reflect.Int32 ||
+ v.getResolvedValue().Kind() == reflect.Int64 ||
+ v.getResolvedValue().Kind() == reflect.Uint ||
+ v.getResolvedValue().Kind() == reflect.Uint8 ||
+ v.getResolvedValue().Kind() == reflect.Uint16 ||
+ v.getResolvedValue().Kind() == reflect.Uint32 ||
+ v.getResolvedValue().Kind() == reflect.Uint64
+}
+
+// IsNumber checks whether the underlying value is either an integer
+// or a float.
+func (v *Value) IsNumber() bool {
+ return v.IsInteger() || v.IsFloat()
+}
+
+// IsTime checks whether the underlying value is a time.Time.
+func (v *Value) IsTime() bool {
+ _, ok := v.Interface().(time.Time)
+ return ok
+}
+
+// IsNil checks whether the underlying value is NIL
+func (v *Value) IsNil() bool {
+ //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
+ return !v.getResolvedValue().IsValid()
+}
+
+// String returns a string for the underlying value. If this value is not
+// of type string, pongo2 tries to convert it. Currently the following
+// types for underlying values are supported:
+//
+// 1. string
+// 2. int/uint (any size)
+// 3. float (any precision)
+// 4. bool
+// 5. time.Time
+// 6. String() will be called on the underlying value if provided
+//
+// NIL values will lead to an empty string. Unsupported types are leading
+// to their respective type name.
+func (v *Value) String() string {
+ if v.IsNil() {
+ return ""
+ }
+
+ switch v.getResolvedValue().Kind() {
+ case reflect.String:
+ return v.getResolvedValue().String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(v.getResolvedValue().Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ return fmt.Sprintf("%f", v.getResolvedValue().Float())
+ case reflect.Bool:
+ if v.Bool() {
+ return "True"
+ }
+ return "False"
+ case reflect.Struct:
+ if t, ok := v.Interface().(fmt.Stringer); ok {
+ return t.String()
+ }
+ }
+
+ logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
+ return v.getResolvedValue().String()
+}
+
+// Integer returns the underlying value as an integer (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.
+func (v *Value) Integer() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return int(v.getResolvedValue().Float())
+ case reflect.String:
+ // Try to convert from string to int (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0
+ }
+ return int(f)
+ default:
+ logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Float returns the underlying value as a float (converts the underlying
+// value, if necessary). If it's not possible to convert the underlying value,
+// it will return 0.0.
+func (v *Value) Float() float64 {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.getResolvedValue().Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return float64(v.getResolvedValue().Uint())
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float()
+ case reflect.String:
+ // Try to convert from string to float64 (base 10)
+ f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
+ if err != nil {
+ return 0.0
+ }
+ return f
+ default:
+ logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0.0
+ }
+}
+
+// Bool returns the underlying value as bool. If the value is not bool, false
+// will always be returned. If you're looking for true/false-evaluation of the
+// underlying value, have a look on the IsTrue()-function.
+func (v *Value) Bool() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ default:
+ logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Time returns the underlying value as time.Time.
+// If the underlying value is not a time.Time, it returns the zero value of time.Time.
+func (v *Value) Time() time.Time {
+ tm, ok := v.Interface().(time.Time)
+ if ok {
+ return tm
+ }
+ return time.Time{}
+}
+
+// IsTrue tries to evaluate the underlying value the Pythonic-way:
+//
+// Returns TRUE in one the following cases:
+//
+// * int != 0
+// * uint != 0
+// * float != 0.0
+// * len(array/chan/map/slice/string) > 0
+// * bool == true
+// * underlying value is a struct
+//
+// Otherwise returns always FALSE.
+func (v *Value) IsTrue() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.getResolvedValue().Int() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return v.getResolvedValue().Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return v.getResolvedValue().Float() != 0
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.getResolvedValue().Len() > 0
+ case reflect.Bool:
+ return v.getResolvedValue().Bool()
+ case reflect.Struct:
+ return true // struct instance is always true
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// Negate tries to negate the underlying value. It's mainly used for
+// the NOT-operator and in conjunction with a call to
+// return_value.IsTrue() afterwards.
+//
+// Example:
+// AsValue(1).Negate().IsTrue() == false
+func (v *Value) Negate() *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if v.Integer() != 0 {
+ return AsValue(0)
+ }
+ return AsValue(1)
+ case reflect.Float32, reflect.Float64:
+ if v.Float() != 0.0 {
+ return AsValue(float64(0.0))
+ }
+ return AsValue(float64(1.1))
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return AsValue(v.getResolvedValue().Len() == 0)
+ case reflect.Bool:
+ return AsValue(!v.getResolvedValue().Bool())
+ case reflect.Struct:
+ return AsValue(false)
+ default:
+ logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue(true)
+ }
+}
+
+// Len returns the length for an array, chan, map, slice or string.
+// Otherwise it will return 0.
+func (v *Value) Len() int {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return v.getResolvedValue().Len()
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return len(runes)
+ default:
+ logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return 0
+ }
+}
+
+// Slice slices an array, slice or string. Otherwise it will
+// return an empty []int.
+func (v *Value) Slice(i, j int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ return AsValue(v.getResolvedValue().Slice(i, j).Interface())
+ case reflect.String:
+ runes := []rune(v.getResolvedValue().String())
+ return AsValue(string(runes[i:j]))
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Index gets the i-th item of an array, slice or string. Otherwise
+// it will return NIL.
+func (v *Value) Index(i int) *Value {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice:
+ if i >= v.Len() {
+ return AsValue(nil)
+ }
+ return AsValue(v.getResolvedValue().Index(i).Interface())
+ case reflect.String:
+ //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
+ s := v.getResolvedValue().String()
+ runes := []rune(s)
+ if i < len(runes) {
+ return AsValue(string(runes[i]))
+ }
+ return AsValue("")
+ default:
+ logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return AsValue([]int{})
+ }
+}
+
+// Contains checks whether the underlying value (which must be of type struct, map,
+// string, array or slice) contains of another Value (e. g. used to check
+// whether a struct contains of a specific field or a map contains a specific key).
+//
+// Example:
+// AsValue("Hello, World!").Contains(AsValue("World")) == true
+func (v *Value) Contains(other *Value) bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Struct:
+ fieldValue := v.getResolvedValue().FieldByName(other.String())
+ return fieldValue.IsValid()
+ case reflect.Map:
+ var mapValue reflect.Value
+ switch other.Interface().(type) {
+ case int:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ case string:
+ mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
+ default:
+ logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
+ return false
+ }
+
+ return mapValue.IsValid()
+ case reflect.String:
+ return strings.Contains(v.getResolvedValue().String(), other.String())
+
+ case reflect.Slice, reflect.Array:
+ for i := 0; i < v.getResolvedValue().Len(); i++ {
+ item := v.getResolvedValue().Index(i)
+ if other.Interface() == item.Interface() {
+ return true
+ }
+ }
+ return false
+
+ default:
+ logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ return false
+ }
+}
+
+// CanSlice checks whether the underlying value is of type array, slice or string.
+// You normally would use CanSlice() before using the Slice() operation.
+func (v *Value) CanSlice() bool {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ return true
+ }
+ return false
+}
+
+// Iterate iterates over a map, array, slice or a string. It calls the
+// function's first argument for every value with the following arguments:
+//
+// idx current 0-index
+// count total amount of items
+// key *Value for the key or item
+// value *Value (only for maps, the respective value for a specific key)
+//
+// If the underlying value has no items or is not one of the types above,
+// the empty function (function's second argument) will be called.
+func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
+ v.IterateOrder(fn, empty, false, false)
+}
+
+// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
+// not affect the iteration through a map because maps don't have any particular order.
+// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
+func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
+ switch v.getResolvedValue().Kind() {
+ case reflect.Map:
+ keys := sortedKeys(v.getResolvedValue().MapKeys())
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(keys))
+ } else {
+ sort.Sort(keys)
+ }
+ }
+ keyLen := len(keys)
+ for idx, key := range keys {
+ value := v.getResolvedValue().MapIndex(key)
+ if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
+ return
+ }
+ }
+ if keyLen == 0 {
+ empty()
+ }
+ return // done
+ case reflect.Array, reflect.Slice:
+ var items valuesList
+
+ itemCount := v.getResolvedValue().Len()
+ for i := 0; i < itemCount; i++ {
+ items = append(items, &Value{val: v.getResolvedValue().Index(i)})
+ }
+
+ if sorted {
+ if reverse {
+ sort.Sort(sort.Reverse(items))
+ } else {
+ sort.Sort(items)
+ }
+ } else {
+ if reverse {
+ for i := 0; i < itemCount/2; i++ {
+ items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
+ }
+ }
+ }
+
+ if len(items) > 0 {
+ for idx, item := range items {
+ if !fn(idx, itemCount, item, nil) {
+ return
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ case reflect.String:
+ if sorted {
+ // TODO(flosch): Handle sorted
+ panic("TODO: handle sort for type string")
+ }
+
+ // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
+ charCount := v.getResolvedValue().Len()
+ if charCount > 0 {
+ if reverse {
+ for i := charCount - 1; i >= 0; i-- {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ } else {
+ for i := 0; i < charCount; i++ {
+ if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
+ return
+ }
+ }
+ }
+ } else {
+ empty()
+ }
+ return // done
+ default:
+ logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
+ }
+ empty()
+}
+
+// Interface gives you access to the underlying value.
+func (v *Value) Interface() interface{} {
+ if v.val.IsValid() {
+ return v.val.Interface()
+ }
+ return nil
+}
+
+// EqualValueTo checks whether two values are containing the same value or object.
+func (v *Value) EqualValueTo(other *Value) bool {
+ // comparison of uint with int fails using .Interface()-comparison (see issue #64)
+ if v.IsInteger() && other.IsInteger() {
+ return v.Integer() == other.Integer()
+ }
+ if v.IsTime() && other.IsTime() {
+ return v.Time().Equal(other.Time())
+ }
+ return v.Interface() == other.Interface()
+}
+
+type sortedKeys []reflect.Value
+
+func (sk sortedKeys) Len() int {
+ return len(sk)
+}
+
+func (sk sortedKeys) Less(i, j int) bool {
+ vi := &Value{val: sk[i]}
+ vj := &Value{val: sk[j]}
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (sk sortedKeys) Swap(i, j int) {
+ sk[i], sk[j] = sk[j], sk[i]
+}
+
+type valuesList []*Value
+
+func (vl valuesList) Len() int {
+ return len(vl)
+}
+
+func (vl valuesList) Less(i, j int) bool {
+ vi := vl[i]
+ vj := vl[j]
+ switch {
+ case vi.IsInteger() && vj.IsInteger():
+ return vi.Integer() < vj.Integer()
+ case vi.IsFloat() && vj.IsFloat():
+ return vi.Float() < vj.Float()
+ default:
+ return vi.String() < vj.String()
+ }
+}
+
+func (vl valuesList) Swap(i, j int) {
+ vl[i], vl[j] = vl[j], vl[i]
+}
diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go
new file mode 100644
index 00000000..25e2af40
--- /dev/null
+++ b/vendor/github.com/flosch/pongo2/variable.go
@@ -0,0 +1,693 @@
+package pongo2
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ varTypeInt = iota
+ varTypeIdent
+)
+
+var (
+ typeOfValuePtr = reflect.TypeOf(new(Value))
+ typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
+)
+
+type variablePart struct {
+ typ int
+ s string
+ i int
+
+ isFunctionCall bool
+ callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
+}
+
+type functionCallArgument interface {
+ Evaluate(*ExecutionContext) (*Value, *Error)
+}
+
+// TODO: Add location tokens
+type stringResolver struct {
+ locationToken *Token
+ val string
+}
+
+type intResolver struct {
+ locationToken *Token
+ val int
+}
+
+type floatResolver struct {
+ locationToken *Token
+ val float64
+}
+
+type boolResolver struct {
+ locationToken *Token
+ val bool
+}
+
+type variableResolver struct {
+ locationToken *Token
+
+ parts []*variablePart
+}
+
+type nodeFilteredVariable struct {
+ locationToken *Token
+
+ resolver IEvaluator
+ filterChain []*filterCall
+}
+
+type nodeVariable struct {
+ locationToken *Token
+ expr IEvaluator
+}
+
+type executionCtxEval struct{}
+
+func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := v.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := vr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := s.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := i.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := f.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := b.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (v *nodeFilteredVariable) GetPositionToken() *Token {
+ return v.locationToken
+}
+
+func (vr *variableResolver) GetPositionToken() *Token {
+ return vr.locationToken
+}
+
+func (s *stringResolver) GetPositionToken() *Token {
+ return s.locationToken
+}
+
+func (i *intResolver) GetPositionToken() *Token {
+ return i.locationToken
+}
+
+func (f *floatResolver) GetPositionToken() *Token {
+ return f.locationToken
+}
+
+func (b *boolResolver) GetPositionToken() *Token {
+ return b.locationToken
+}
+
+func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(s.val), nil
+}
+
+func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(i.val), nil
+}
+
+func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(f.val), nil
+}
+
+func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(b.val), nil
+}
+
+func (s *stringResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (i *intResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (f *floatResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (b *boolResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (nv *nodeVariable) FilterApplied(name string) bool {
+ return nv.expr.FilterApplied(name)
+}
+
+func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
+ value, err := nv.expr.Evaluate(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
+ // apply escape filter
+ value, err = filters["escape"](value, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ writer.WriteString(value.String())
+ return nil
+}
+
+func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ return AsValue(ctx), nil
+}
+
+func (vr *variableResolver) FilterApplied(name string) bool {
+ return false
+}
+
+func (vr *variableResolver) String() string {
+ parts := make([]string, 0, len(vr.parts))
+ for _, p := range vr.parts {
+ switch p.typ {
+ case varTypeInt:
+ parts = append(parts, strconv.Itoa(p.i))
+ case varTypeIdent:
+ parts = append(parts, p.s)
+ default:
+ panic("unimplemented")
+ }
+ }
+ return strings.Join(parts, ".")
+}
+
+func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
+ var current reflect.Value
+ var isSafe bool
+
+ for idx, part := range vr.parts {
+ if idx == 0 {
+ // We're looking up the first part of the variable.
+ // First we're having a look in our private
+ // context (e. g. information provided by tags, like the forloop)
+ val, inPrivate := ctx.Private[vr.parts[0].s]
+ if !inPrivate {
+ // Nothing found? Then have a final lookup in the public context
+ val = ctx.Public[vr.parts[0].s]
+ }
+ current = reflect.ValueOf(val) // Get the initial value
+ } else {
+ // Next parts, resolve it from current
+
+ // Before resolving the pointer, let's see if we have a method to call
+ // Problem with resolving the pointer is we're changing the receiver
+ isFunc := false
+ if part.typ == varTypeIdent {
+ funcValue := current.MethodByName(part.s)
+ if funcValue.IsValid() {
+ current = funcValue
+ isFunc = true
+ }
+ }
+
+ if !isFunc {
+ // If current a pointer, resolve it
+ if current.Kind() == reflect.Ptr {
+ current = current.Elem()
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+ }
+
+ // Look up which part must be called now
+ switch part.typ {
+ case varTypeInt:
+ // Calling an index is only possible for:
+ // * slices/arrays/strings
+ switch current.Kind() {
+ case reflect.String, reflect.Array, reflect.Slice:
+ if part.i >= 0 && current.Len() > part.i {
+ current = current.Index(part.i)
+ } else {
+ // In Django, exceeding the length of a list is just empty.
+ return AsValue(nil), nil
+ }
+ default:
+ return nil, fmt.Errorf("Can't access an index on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ case varTypeIdent:
+ // debugging:
+ // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
+
+ // Calling a field or key
+ switch current.Kind() {
+ case reflect.Struct:
+ current = current.FieldByName(part.s)
+ case reflect.Map:
+ current = current.MapIndex(reflect.ValueOf(part.s))
+ default:
+ return nil, fmt.Errorf("Can't access a field by name on type %s (variable %s)",
+ current.Kind().String(), vr.String())
+ }
+ default:
+ panic("unimplemented")
+ }
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (anymore)
+ return AsValue(nil), nil
+ }
+
+ // If current is a reflect.ValueOf(pongo2.Value), then unpack it
+ // Happens in function calls (as a return value) or by injecting
+ // into the execution context (e.g. in a for-loop)
+ if current.Type() == typeOfValuePtr {
+ tmpValue := current.Interface().(*Value)
+ current = tmpValue.val
+ isSafe = tmpValue.safe
+ }
+
+ // Check whether this is an interface and resolve it where required
+ if current.Kind() == reflect.Interface {
+ current = reflect.ValueOf(current.Interface())
+ }
+
+ // Check if the part is a function call
+ if part.isFunctionCall || current.Kind() == reflect.Func {
+ // Check for callable
+ if current.Kind() != reflect.Func {
+ return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
+ }
+
+ // Check for correct function syntax and types
+ // func(*Value, ...) *Value
+ t := current.Type()
+ currArgs := part.callingArgs
+
+ // If an implicit ExecCtx is needed
+ if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
+ currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
+ }
+
+ // Input arguments
+ if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
+ return nil,
+ fmt.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
+ t.NumIn(), vr.String(), len(currArgs))
+ }
+
+ // Output arguments
+ if t.NumOut() != 1 && t.NumOut() != 2 {
+ return nil, fmt.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
+ }
+
+ // Evaluate all parameters
+ var parameters []reflect.Value
+
+ numArgs := t.NumIn()
+ isVariadic := t.IsVariadic()
+ var fnArg reflect.Type
+
+ for idx, arg := range currArgs {
+ pv, err := arg.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if isVariadic {
+ if idx >= t.NumIn()-1 {
+ fnArg = t.In(numArgs - 1).Elem()
+ } else {
+ fnArg = t.In(idx)
+ }
+ } else {
+ fnArg = t.In(idx)
+ }
+
+ if fnArg != typeOfValuePtr {
+ // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
+ if !isVariadic {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
+ idx, vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ } else {
+ if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
+ vr.String(), fnArg.String(), pv.Interface())
+ }
+ // Function's argument has another type, using the interface-value
+ parameters = append(parameters, reflect.ValueOf(pv.Interface()))
+ }
+ } else {
+ // Function's argument is a *pongo2.Value
+ parameters = append(parameters, reflect.ValueOf(pv))
+ }
+ }
+
+ // Check if any of the values are invalid
+ for _, p := range parameters {
+ if p.Kind() == reflect.Invalid {
+ return nil, fmt.Errorf("Calling a function using an invalid parameter")
+ }
+ }
+
+ // Call it and get first return parameter back
+ values := current.Call(parameters)
+ rv := values[0]
+ if t.NumOut() == 2 {
+ e := values[1].Interface()
+ if e != nil {
+ err, ok := e.(error)
+ if !ok {
+ return nil, fmt.Errorf("The second return value is not an error")
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if rv.Type() != typeOfValuePtr {
+ current = reflect.ValueOf(rv.Interface())
+ } else {
+ // Return the function call value
+ current = rv.Interface().(*Value).val
+ isSafe = rv.Interface().(*Value).safe
+ }
+ }
+
+ if !current.IsValid() {
+ // Value is not valid (e. g. NIL value)
+ return AsValue(nil), nil
+ }
+ }
+
+ return &Value{val: current, safe: isSafe}, nil
+}
+
+func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := vr.resolve(ctx)
+ if err != nil {
+ return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
+ }
+ return value, nil
+}
+
+func (v *nodeFilteredVariable) FilterApplied(name string) bool {
+ for _, filter := range v.filterChain {
+ if filter.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
+ value, err := v.resolver.Evaluate(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, filter := range v.filterChain {
+ value, err = filter.Execute(value, ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return value, nil
+}
+
+// IDENT | IDENT.(IDENT|NUMBER)...
+func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
+ t := p.Current()
+
+ if t == nil {
+ return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
+ }
+
+ // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
+ switch t.Typ {
+ case TokenNumber:
+ p.Consume()
+
+ // One exception to the rule that we don't have float64 literals is at the beginning
+ // of an expression (or a variable name). Since we know we started with an integer
+ // which can't obviously be a variable name, we can check whether the first number
+ // is followed by dot (and then a number again). If so we're converting it to a float64.
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // float64
+ t2 := p.MatchType(TokenNumber)
+ if t2 == nil {
+ return nil, p.Error("Expected a number after the '.'.", nil)
+ }
+ f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ fr := &floatResolver{
+ locationToken: t,
+ val: f,
+ }
+ return fr, nil
+ }
+ i, err := strconv.Atoi(t.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t)
+ }
+ nr := &intResolver{
+ locationToken: t,
+ val: i,
+ }
+ return nr, nil
+
+ case TokenString:
+ p.Consume()
+ sr := &stringResolver{
+ locationToken: t,
+ val: t.Val,
+ }
+ return sr, nil
+ case TokenKeyword:
+ p.Consume()
+ switch t.Val {
+ case "true":
+ br := &boolResolver{
+ locationToken: t,
+ val: true,
+ }
+ return br, nil
+ case "false":
+ br := &boolResolver{
+ locationToken: t,
+ val: false,
+ }
+ return br, nil
+ default:
+ return nil, p.Error("This keyword is not allowed here.", nil)
+ }
+ }
+
+ resolver := &variableResolver{
+ locationToken: t,
+ }
+
+ // First part of a variable MUST be an identifier
+ if t.Typ != TokenIdentifier {
+ return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
+ }
+
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t.Val,
+ })
+
+ p.Consume() // we consumed the first identifier of the variable name
+
+variableLoop:
+ for p.Remaining() > 0 {
+ t = p.Current()
+
+ if p.Match(TokenSymbol, ".") != nil {
+ // Next variable part (can be either NUMBER or IDENT)
+ t2 := p.Current()
+ if t2 != nil {
+ switch t2.Typ {
+ case TokenIdentifier:
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeIdent,
+ s: t2.Val,
+ })
+ p.Consume() // consume: IDENT
+ continue variableLoop
+ case TokenNumber:
+ i, err := strconv.Atoi(t2.Val)
+ if err != nil {
+ return nil, p.Error(err.Error(), t2)
+ }
+ resolver.parts = append(resolver.parts, &variablePart{
+ typ: varTypeInt,
+ i: i,
+ })
+ p.Consume() // consume: NUMBER
+ continue variableLoop
+ default:
+ return nil, p.Error("This token is not allowed within a variable name.", t2)
+ }
+ } else {
+ // EOF
+ return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
+ p.lastToken)
+ }
+ } else if p.Match(TokenSymbol, "(") != nil {
+ // Function call
+ // FunctionName '(' Comma-separated list of expressions ')'
+ part := resolver.parts[len(resolver.parts)-1]
+ part.isFunctionCall = true
+ argumentLoop:
+ for {
+ if p.Remaining() == 0 {
+ return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
+ }
+
+ if p.Peek(TokenSymbol, ")") == nil {
+ // No closing bracket, so we're parsing an expression
+ exprArg, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ part.callingArgs = append(part.callingArgs, exprArg)
+
+ if p.Match(TokenSymbol, ")") != nil {
+ // If there's a closing bracket after an expression, we will stop parsing the arguments
+ break argumentLoop
+ } else {
+ // If there's NO closing bracket, there MUST be an comma
+ if p.Match(TokenSymbol, ",") == nil {
+ return nil, p.Error("Missing comma or closing bracket after argument.", nil)
+ }
+ }
+ } else {
+ // We got a closing bracket, so stop parsing arguments
+ p.Consume()
+ break argumentLoop
+ }
+
+ }
+ // We're done parsing the function call, next variable part
+ continue variableLoop
+ }
+
+ // No dot or function call? Then we're done with the variable parsing
+ break
+ }
+
+ return resolver, nil
+}
+
+func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
+ v := &nodeFilteredVariable{
+ locationToken: p.Current(),
+ }
+
+ // Parse the variable name
+ resolver, err := p.parseVariableOrLiteral()
+ if err != nil {
+ return nil, err
+ }
+ v.resolver = resolver
+
+ // Parse all the filters
+filterLoop:
+ for p.Match(TokenSymbol, "|") != nil {
+ // Parse one single filter
+ filter, err := p.parseFilter()
+ if err != nil {
+ return nil, err
+ }
+
+ // Check sandbox filter restriction
+ if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
+ return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
+ }
+
+ v.filterChain = append(v.filterChain, filter)
+
+ continue filterLoop
+ }
+
+ return v, nil
+}
+
+func (p *Parser) parseVariableElement() (INode, *Error) {
+ node := &nodeVariable{
+ locationToken: p.Current(),
+ }
+
+ p.Consume() // consume '{{'
+
+ expr, err := p.ParseExpression()
+ if err != nil {
+ return nil, err
+ }
+ node.expr = expr
+
+ if p.Match(TokenSymbol, "}}") == nil {
+ return nil, p.Error("'}}' expected", nil)
+ }
+
+ return node, nil
+}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE b/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
new file mode 100644
index 00000000..67c4fb56
--- /dev/null
+++ b/vendor/github.com/go-macaroon-bakery/macaroonpb/LICENSE
@@ -0,0 +1,187 @@
+Copyright © 2014, Roger Peppe, Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md b/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
new file mode 100644
index 00000000..4d03b8a8
--- /dev/null
+++ b/vendor/github.com/go-macaroon-bakery/macaroonpb/README.md
@@ -0,0 +1,13 @@
+# Macaroon ID Protocol Buffers
+
+This module defines the serialization format of macaroon identifiers for
+macaroons created by the macaroon-bakery. For the most part this encoding
+is considered an internal implementation detail of the macaroon-bakery
+and external applications should not rely on any of the details of this
+encoding being maintained between different bakery versions.
+
+This is broken out into a separate module as the protobuf implementation
+works in such a way that one cannot have multiple definitions of a
+message in any particular application's dependency tree. This module
+therefore provides a common definition for use by multiple versions of
+the macaroon-bakery to facilitate easier migration in client applications.
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
new file mode 100644
index 00000000..f7ddc18b
--- /dev/null
+++ b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.go
@@ -0,0 +1,19 @@
+// Package macaroonpb defines the serialization details of macaroon ids
+// used in the macaroon-bakery.
+package macaroonpb
+
+import (
+ "github.com/golang/protobuf/proto"
+)
+
+//go:generate protoc --go_out . id.proto
+
+// MarshalBinary implements encoding.BinaryMarshal.
+func (id *MacaroonId) MarshalBinary() ([]byte, error) {
+ return proto.Marshal(id)
+}
+
+// UnmarshalBinary implements encoding.UnmarshalBinary.
+func (id *MacaroonId) UnmarshalBinary(data []byte) error {
+ return proto.Unmarshal(data, id)
+}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
new file mode 100644
index 00000000..41b69d9d
--- /dev/null
+++ b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.pb.go
@@ -0,0 +1,238 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v3.12.3
+// source: id.proto
+
+package macaroonpb
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type MacaroonId struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"`
+ Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"`
+}
+
+func (x *MacaroonId) Reset() {
+ *x = MacaroonId{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_id_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MacaroonId) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MacaroonId) ProtoMessage() {}
+
+func (x *MacaroonId) ProtoReflect() protoreflect.Message {
+ mi := &file_id_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MacaroonId.ProtoReflect.Descriptor instead.
+func (*MacaroonId) Descriptor() ([]byte, []int) {
+ return file_id_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MacaroonId) GetNonce() []byte {
+ if x != nil {
+ return x.Nonce
+ }
+ return nil
+}
+
+func (x *MacaroonId) GetStorageId() []byte {
+ if x != nil {
+ return x.StorageId
+ }
+ return nil
+}
+
+func (x *MacaroonId) GetOps() []*Op {
+ if x != nil {
+ return x.Ops
+ }
+ return nil
+}
+
+type Op struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
+ Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"`
+}
+
+func (x *Op) Reset() {
+ *x = Op{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_id_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Op) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Op) ProtoMessage() {}
+
+func (x *Op) ProtoReflect() protoreflect.Message {
+ mi := &file_id_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Op.ProtoReflect.Descriptor instead.
+func (*Op) Descriptor() ([]byte, []int) {
+ return file_id_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Op) GetEntity() string {
+ if x != nil {
+ return x.Entity
+ }
+ return ""
+}
+
+func (x *Op) GetActions() []string {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+var File_id_proto protoreflect.FileDescriptor
+
+var file_id_proto_rawDesc = []byte{
+ 0x0a, 0x08, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0a, 0x4d, 0x61,
+ 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1c,
+ 0x0a, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x03,
+ 0x6f, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4f, 0x70, 0x52, 0x03,
+ 0x6f, 0x70, 0x73, 0x22, 0x36, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0e, 0x5a, 0x0c, 0x2e,
+ 0x3b, 0x6d, 0x61, 0x63, 0x61, 0x72, 0x6f, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_id_proto_rawDescOnce sync.Once
+ file_id_proto_rawDescData = file_id_proto_rawDesc
+)
+
+func file_id_proto_rawDescGZIP() []byte {
+ file_id_proto_rawDescOnce.Do(func() {
+ file_id_proto_rawDescData = protoimpl.X.CompressGZIP(file_id_proto_rawDescData)
+ })
+ return file_id_proto_rawDescData
+}
+
+var file_id_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_id_proto_goTypes = []interface{}{
+ (*MacaroonId)(nil), // 0: MacaroonId
+ (*Op)(nil), // 1: Op
+}
+var file_id_proto_depIdxs = []int32{
+ 1, // 0: MacaroonId.ops:type_name -> Op
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_id_proto_init() }
+func file_id_proto_init() {
+ if File_id_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_id_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MacaroonId); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_id_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Op); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_id_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_id_proto_goTypes,
+ DependencyIndexes: file_id_proto_depIdxs,
+ MessageInfos: file_id_proto_msgTypes,
+ }.Build()
+ File_id_proto = out.File
+ file_id_proto_rawDesc = nil
+ file_id_proto_goTypes = nil
+ file_id_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
new file mode 100644
index 00000000..bfe891ee
--- /dev/null
+++ b/vendor/github.com/go-macaroon-bakery/macaroonpb/id.proto
@@ -0,0 +1,14 @@
+syntax="proto3";
+
+option go_package = ".;macaroonpb";
+
+message MacaroonId {
+ bytes nonce = 1;
+ bytes storageId = 2;
+ repeated Op ops = 3;
+}
+
+message Op {
+ string entity = 1;
+ repeated string actions = 2;
+}
diff --git a/vendor/github.com/go-resty/resty/v2/.gitignore b/vendor/github.com/go-resty/resty/v2/.gitignore
new file mode 100644
index 00000000..9e856bd4
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/.gitignore
@@ -0,0 +1,30 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+coverage.out
+coverage.txt
+
+# Exclude intellij IDE folders
+.idea/*
diff --git a/vendor/github.com/go-resty/resty/v2/BUILD.bazel b/vendor/github.com/go-resty/resty/v2/BUILD.bazel
new file mode 100644
index 00000000..03bb44c3
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/BUILD.bazel
@@ -0,0 +1,48 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@bazel_gazelle//:def.bzl", "gazelle")
+
+# gazelle:prefix github.com/go-resty/resty/v2
+# gazelle:go_naming_convention import_alias
+gazelle(name = "gazelle")
+
+go_library(
+ name = "resty",
+ srcs = [
+ "client.go",
+ "middleware.go",
+ "redirect.go",
+ "request.go",
+ "response.go",
+ "resty.go",
+ "retry.go",
+ "trace.go",
+ "transport.go",
+ "transport112.go",
+ "util.go",
+ ],
+ importpath = "github.com/go-resty/resty/v2",
+ visibility = ["//visibility:public"],
+ deps = ["@org_golang_x_net//publicsuffix:go_default_library"],
+)
+
+go_test(
+ name = "resty_test",
+ srcs = [
+ "client_test.go",
+ "context_test.go",
+ "example_test.go",
+ "request_test.go",
+ "resty_test.go",
+ "retry_test.go",
+ "util_test.go",
+ ],
+ data = glob([".testdata/*"]),
+ embed = [":resty"],
+ deps = ["@org_golang_x_net//proxy:go_default_library"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":resty",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/go-resty/resty/v2/LICENSE b/vendor/github.com/go-resty/resty/v2/LICENSE
new file mode 100644
index 00000000..27326a65
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2021 Jeevanandam M., https://myjeeva.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md
new file mode 100644
index 00000000..8ec65182
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/README.md
@@ -0,0 +1,906 @@
+
+
Resty
+Simple HTTP and REST client library for Go (inspired by Ruby rest-client)
+Features section describes in detail about Resty capabilities
+
+
+

+
+
+
Resty Communication Channels
+

+
+
+## News
+
+ * v2.7.0 [released](https://github.com/go-resty/resty/releases/tag/v2.7.0) and tagged on Nov 03, 2021.
+ * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
+ * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
+ * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
+
+## Features
+
+ * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
+ * Simple and chainable methods for settings and request
+ * [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
+ * Auto detects `Content-Type`
+ * Buffer less processing for `io.Reader`
+ * Native `*http.Request` instance may be accessed during middleware and request execution via `Request.RawRequest`
+ * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
+ * [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Response) object gives you more possibility
+ * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
+ * Know your `response.Time()` and when we `response.ReceivedAt()`
+ * Automatic marshal and unmarshal for `JSON` and `XML` content type
+ * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
+ * For auto-unmarshal, refer to -
+ - Success scenario [Request.SetResult()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetResult) and [Response.Result()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Result).
+ - Error scenario [Request.SetError()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetError) and [Response.Error()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Error).
+ - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
+ * Resty provides an option to override [JSON Marshal/Unmarshal and XML Marshal/Unmarshal](#override-json--xml-marshalunmarshal)
+ * Easy to upload one or more file(s) via `multipart/form-data`
+ * Auto detects file content type
+ * Request URL [Path Params (aka URI Params)](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetPathParams)
+ * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
+ * Resty client HTTP & REST [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnBeforeRequest) and [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnAfterResponse) middlewares
+ * `Request.SetContext` supported
+ * Authorization option of `BasicAuth` and `Bearer` token
+ * Set request `ContentLength` value for all request or particular request
+ * Custom [Root Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetRootCertificate) and Client [Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetCertificates)
+ * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetOutputDirectory) & [SetOutput](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetOutput).
+ * Cookies for your request and CookieJar support
+ * SRV Record based request instead of Host URL
+ * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
+ * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetAllowGetMethodPayload)
+ * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
+ * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
+ * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
+ * Resty design
+ * Have client level settings & options and also override at Request level if you want to
+ * Request and Response middleware
+ * Create Multiple clients if you want to `resty.New()`
+ * Supports `http.RoundTripper` implementation, see [SetTransport](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetTransport)
+ * goroutine concurrent safe
+ * Resty Client trace, see [Client.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.EnableTrace) and [Request.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.EnableTrace)
+ * Since v2.4.0, trace info contains a `RequestAttempt` value, and the `Request` object contains an `Attempt` attribute
+ * Debug mode - clean and informative logging presentation
+ * Gzip - Go does it automatically also resty has fallback handling too
+ * Works fine with `HTTP/2` and `HTTP/1.1`
+ * [Bazel support](#bazel-support)
+ * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
+ * Well tested client library
+
+### Included Batteries
+
+ * Redirect Policies - see [how to use](#redirect-policy)
+ * NoRedirectPolicy
+ * FlexibleRedirectPolicy
+ * DomainCheckRedirectPolicy
+ * etc. [more info](redirect.go)
+ * Retry Mechanism [how to use](#retries)
+ * Backoff Retry
+ * Conditional Retry
+ * Since v2.6.0, Retry Hooks - [Client](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.AddRetryHook), [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.AddRetryHook)
+ * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
+ * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
+
+
+#### Supported Go Versions
+
+Initially Resty started supporting `go modules` since `v1.10.0` release.
+
+Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
+
+- 1.9.7+
+- 1.10.3+
+- 1.11+
+
+
+## It might be beneficial for your project :smile:
+
+Resty author also published following projects for Go Community.
+
+ * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
+ * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
+ * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
+
+
+## Installation
+
+```bash
+# Go Modules
+require github.com/go-resty/resty/v2 v2.7.0
+```
+
+## Usage
+
+The following samples will assist you to become as comfortable as possible with resty library.
+
+```go
+// Import resty into your code and refer it as `resty`.
+import "github.com/go-resty/resty/v2"
+```
+
+#### Simple GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ EnableTrace().
+ Get("https://httpbin.org/get")
+
+// Explore response object
+fmt.Println("Response Info:")
+fmt.Println(" Error :", err)
+fmt.Println(" Status Code:", resp.StatusCode())
+fmt.Println(" Status :", resp.Status())
+fmt.Println(" Proto :", resp.Proto())
+fmt.Println(" Time :", resp.Time())
+fmt.Println(" Received At:", resp.ReceivedAt())
+fmt.Println(" Body :\n", resp)
+fmt.Println()
+
+// Explore trace info
+fmt.Println("Request Trace Info:")
+ti := resp.Request.TraceInfo()
+fmt.Println(" DNSLookup :", ti.DNSLookup)
+fmt.Println(" ConnTime :", ti.ConnTime)
+fmt.Println(" TCPConnTime :", ti.TCPConnTime)
+fmt.Println(" TLSHandshake :", ti.TLSHandshake)
+fmt.Println(" ServerTime :", ti.ServerTime)
+fmt.Println(" ResponseTime :", ti.ResponseTime)
+fmt.Println(" TotalTime :", ti.TotalTime)
+fmt.Println(" IsConnReused :", ti.IsConnReused)
+fmt.Println(" IsConnWasIdle :", ti.IsConnWasIdle)
+fmt.Println(" ConnIdleTime :", ti.ConnIdleTime)
+fmt.Println(" RequestAttempt:", ti.RequestAttempt)
+fmt.Println(" RemoteAddr :", ti.RemoteAddr.String())
+
+/* Output
+Response Info:
+ Error :
+ Status Code: 200
+ Status : 200 OK
+ Proto : HTTP/2.0
+ Time : 457.034718ms
+ Received At: 2020-09-14 15:35:29.784681 -0700 PDT m=+0.458137045
+ Body :
+ {
+ "args": {},
+ "headers": {
+ "Accept-Encoding": "gzip",
+ "Host": "httpbin.org",
+ "User-Agent": "go-resty/2.4.0 (https://github.com/go-resty/resty)",
+ "X-Amzn-Trace-Id": "Root=1-5f5ff031-000ff6292204aa6898e4de49"
+ },
+ "origin": "0.0.0.0",
+ "url": "https://httpbin.org/get"
+ }
+
+Request Trace Info:
+ DNSLookup : 4.074657ms
+ ConnTime : 381.709936ms
+ TCPConnTime : 77.428048ms
+ TLSHandshake : 299.623597ms
+ ServerTime : 75.414703ms
+ ResponseTime : 79.337µs
+ TotalTime : 457.034718ms
+ IsConnReused : false
+ IsConnWasIdle : false
+ ConnIdleTime : 0s
+ RequestAttempt: 1
+ RemoteAddr : 3.221.81.55:443
+*/
+```
+
+#### Enhanced GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetQueryParams(map[string]string{
+ "page_no": "1",
+ "limit": "20",
+ "sort":"name",
+ "order": "asc",
+ "random":strconv.FormatInt(time.Now().Unix(), 10),
+ }).
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/search_result")
+
+
+// Sample of using Request.SetQueryString method
+resp, err := client.R().
+ SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/show_product")
+
+
+// If necessary, you can force response content type to tell Resty to parse a JSON response into your struct
+resp, err := client.R().
+ SetResult(result).
+ ForceContentType("application/json").
+ Get("v2/alpine/manifests/latest")
+```
+
+#### Various POST method combinations
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// POST JSON string
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{"username":"testuser", "password":"testpass"}`).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST []byte array
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST Struct, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(User{Username: "testuser", Password: "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST Map, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST of raw bytes for file upload. For example: upload file to Dropbox
+fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf")
+
+// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
+resp, err := client.R().
+ SetBody(fileBytes).
+ SetContentLength(true). // Dropbox expects this value
+ SetAuthToken("").
+ SetError(&DropboxError{}). // or SetError(DropboxError{}).
+ Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
+
+// Note: resty detects Content-Type for request body/payload if content type header is not set.
+// * For struct and map data type defaults to 'application/json'
+// * Fallback is plain text content type
+```
+
+#### Sample PUT
+
+You can use various combinations of `PUT` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Title: "go-resty",
+ Content: "This is my article content, oh ya!",
+ Author: "Jeevanandam M",
+ Tags: []string{"article", "sample", "resty"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Put("https://myapp.com/article/1234")
+```
+
+#### Sample PATCH
+
+You can use various combinations of `PATCH` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Tags: []string{"new tag1", "new tag2"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Patch("https://myapp.com/articles/1234")
+```
+
+#### Sample DELETE, HEAD, OPTIONS
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// DELETE a article
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Delete("https://myapp.com/articles/1234")
+
+// DELETE a articles with payload/body as a JSON string
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
+ Delete("https://myapp.com/articles")
+
+// HEAD of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Head("https://myapp.com/videos/hi-res-video")
+
+// OPTIONS of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Options("https://myapp.com/servers/nyc-dc-01")
+```
+
+#### Override JSON & XML Marshal/Unmarshal
+
+User could register choice of JSON/XML library into resty or write your own. By default resty registers standard `encoding/json` and `encoding/xml` respectively.
+```go
+// Example of registering json-iterator
+import jsoniter "github.com/json-iterator/go"
+
+json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+client := resty.New()
+client.JSONMarshal = json.Marshal
+client.JSONUnmarshal = json.Unmarshal
+
+// similarly user could do for XML too with -
+client.XMLMarshal
+client.XMLUnmarshal
+```
+
+### Multipart File(s) upload
+
+#### Using io.Reader
+
+```go
+profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png")
+notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt")
+
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
+ SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ }).
+ Post("http://myapp.com/upload")
+```
+
+#### Using File directly from Path
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Single file scenario
+resp, err := client.R().
+ SetFile("profile_img", "/Users/jeeva/test-img.png").
+ Post("http://myapp.com/upload")
+
+// Multiple files scenario
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ Post("http://myapp.com/upload")
+
+// Multipart of form fields and files
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "my city",
+ "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
+ }).
+ Post("http://myapp.com/profile")
+```
+
+#### Sample Form submission
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// just mentioning about POST as an example with simple flow
+// User Login
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "username": "jeeva",
+ "password": "mypass",
+ }).
+ Post("http://myapp.com/login")
+
+// Followed by profile update
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "new city update",
+ }).
+ Post("http://myapp.com/profile")
+
+// Multi value form data
+criteria := url.Values{
+ "search_criteria": []string{"book", "glass", "pencil"},
+}
+resp, err := client.R().
+ SetFormDataFromValues(criteria).
+ Post("http://myapp.com/search")
+```
+
+#### Save HTTP Response into File
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting output directory path, If directory not exists then resty creates one!
+// This is optional one, if you're planning using absoule path in
+// `Request.SetOutput` and can used together.
+client.SetOutputDirectory("/Users/jeeva/Downloads")
+
+// HTTP response gets saved into file, similar to curl -o flag
+_, err := client.R().
+ SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+
+// OR using absolute path
+// Note: output directory path is not used for absolute path
+_, err := client.R().
+ SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+```
+
+#### Request URL Path Params
+
+Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.R().SetPathParams(map[string]string{
+ "userId": "sample@sample.com",
+ "subAccountId": "100002",
+}).
+Get("/v1/users/{userId}/{subAccountId}/details")
+
+// Result:
+// Composed URL - /v1/users/sample@sample.com/100002/details
+```
+
+#### Request and Response Middleware
+
+Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Registering Request Middleware
+client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
+ // Now you have access to Client and current Request object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+
+// Registering Response Middleware
+client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
+ // Now you have access to Client and current Response object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+```
+
+#### OnError Hooks
+
+Resty provides OnError hooks that may be called because:
+
+- The client failed to send the request due to connection timeout, TLS handshake failure, etc...
+- The request was retried the maximum amount of times, and still failed.
+
+If there was a response from the server, the original error will be wrapped in `*resty.ResponseError` which contains the last response received.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.OnError(func(req *resty.Request, err error) {
+ if v, ok := err.(*resty.ResponseError); ok {
+ // v.Response contains the last response from the server
+ // v.Err contains the original error
+ }
+ // Log the error, increment a metric, etc...
+})
+```
+
+#### Redirect Policy
+
+Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Assign Client Redirect Policy. Create one as per you need
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
+
+// Wanna multiple policies such as redirect count, domain name check, etc
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
+ resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+```
+
+##### Custom Redirect Policy
+
+Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Using raw func into resty.SetRedirectPolicy
+client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}))
+
+//---------------------------------------------------
+
+// Using struct create more flexible redirect policy
+type CustomRedirectPolicy struct {
+ // variables goes here
+}
+
+func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}
+
+// Registering in resty
+client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
+```
+
+#### Custom Root Certificates and Client Certificates
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Custom Root certificates, just supply .pem file.
+// you can add one or more root certificates, its get appended
+client.SetRootCertificate("/path/to/root/pemFile1.pem")
+client.SetRootCertificate("/path/to/root/pemFile2.pem")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Custom Root Certificates and Client Certificates from string
+
+```go
+// Custom Root certificates from string
+// You can pass you certificates throught env variables as strings
+// you can add one or more root certificates, its get appended
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.X509KeyPair([]byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"), []byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"))
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Proxy Settings - Client as well as at Request Level
+
+Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
+Choose as per your need.
+
+**Client Level Proxy** settings applied to all the request
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting a Proxy URL and Port
+client.SetProxy("http://proxyserver:8888")
+
+// Want to remove proxy setting
+client.RemoveProxy()
+```
+
+#### Retries
+
+Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
+to increase retry intervals after each attempt.
+
+Usage example:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Retries are configured per client
+client.
+ // Set retry count to non zero to enable retries
+ SetRetryCount(3).
+ // You can override initial retry wait time.
+ // Default is 100 milliseconds.
+ SetRetryWaitTime(5 * time.Second).
+ // MaxWaitTime can be overridden as well.
+ // Default is 2 seconds.
+ SetRetryMaxWaitTime(20 * time.Second).
+ // SetRetryAfter sets callback to calculate wait time between retries.
+ // Default (nil) implies exponential backoff with jitter
+ SetRetryAfter(func(client *resty.Client, resp *resty.Response) (time.Duration, error) {
+ return 0, errors.New("quota exceeded")
+ })
+```
+
+Above setup will result in resty retrying requests returned non nil error up to
+3 times with delay increased after each attempt.
+
+You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ func(r *resty.Response, err error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+Above example will make resty retry requests ended with `429 Too Many Requests`
+status code.
+
+Multiple retry conditions can be added.
+
+It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
+implemented. [Reference](retry_test.go).
+
+#### Allow GET request with Payload
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Allow GET request with Payload. This is disabled by default.
+client.SetAllowGetMethodPayload(true)
+```
+
+#### Wanna Multiple Clients
+
+```go
+// Here you go!
+// Client 1
+client1 := resty.New()
+client1.R().Get("http://httpbin.org")
+// ...
+
+// Client 2
+client2 := resty.New()
+client2.R().Head("http://httpbin.org")
+// ...
+
+// Bend it as per your need!!!
+```
+
+#### Remaining Client Settings & its Options
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Unique settings at Client level
+//--------------------------------
+// Enable debug mode
+client.SetDebug(true)
+
+// Assign Client TLSClientConfig
+// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+
+// or One can disable security check (https)
+client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+
+// Set client timeout as per your need
+client.SetTimeout(1 * time.Minute)
+
+
+// You can override all below settings and options at request level if you want to
+//--------------------------------------------------------------------------------
+// Host URL for all request. So you can use relative URL in the request
+client.SetHostURL("http://httpbin.org")
+
+// Headers for all request
+client.SetHeader("Accept", "application/json")
+client.SetHeaders(map[string]string{
+ "Content-Type": "application/json",
+ "User-Agent": "My custom User Agent String",
+ })
+
+// Cookies for all request
+client.SetCookie(&http.Cookie{
+ Name:"go-resty",
+ Value:"This is cookie value",
+ Path: "/",
+ Domain: "sample.com",
+ MaxAge: 36000,
+ HttpOnly: true,
+ Secure: false,
+ })
+client.SetCookies(cookies)
+
+// URL query parameters for all request
+client.SetQueryParam("user_id", "00001")
+client.SetQueryParams(map[string]string{ // sample of those who use this manner
+ "api_key": "api-key-here",
+ "api_secert": "api-secert",
+ })
+client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+
+// Form data for all request. Typically used with POST and PUT
+client.SetFormData(map[string]string{
+ "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+ })
+
+// Basic Auth for all request
+client.SetBasicAuth("myuser", "mypass")
+
+// Bearer Auth Token for all request
+client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+
+// Enabling Content length value for all request
+client.SetContentLength(true)
+
+// Registering global Error object structure for JSON/XML request
+client.SetError(&Error{}) // or resty.SetError(Error{})
+```
+
+#### Unix Socket
+
+```go
+unixSocket := "/var/run/my_socket.sock"
+
+// Create a Go's http.Transport so we can set it in resty.
+transport := http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", unixSocket)
+ },
+}
+
+// Create a Resty Client
+client := resty.New()
+
+// Set the previous transport that we created, set the scheme of the communication to the
+// socket and set the unixSocket as the HostURL.
+client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+
+// No need to write the host's URL on the request, just the path.
+client.R().Get("/index.html")
+```
+
+#### Bazel Support
+
+Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
+For example, to run all tests:
+
+```shell
+bazel test :resty_test
+```
+
+#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
+
+In order to mock the http requests when testing your application you
+could use the `httpmock` library.
+
+When using the default resty client, you should pass the client to the library as follow:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Get the underlying HTTP Client and set it to Mock
+httpmock.ActivateNonDefault(client.GetClient())
+```
+
+More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
+
+## Versioning
+
+Resty releases versions according to [Semantic Versioning](http://semver.org)
+
+ * Resty v2 does not use `gopkg.in` service for library versioning.
+ * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
+ * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
+ * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
+
+## Contribution
+
+I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
+
+BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
+
+## Creator
+
+[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
+
+## Core Team
+
+Have a look on [Members](https://github.com/orgs/go-resty/people) page.
+
+## Contributors
+
+Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
+
+## License
+
+Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/go-resty/resty/v2/WORKSPACE b/vendor/github.com/go-resty/resty/v2/WORKSPACE
new file mode 100644
index 00000000..9ef03e95
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/WORKSPACE
@@ -0,0 +1,31 @@
+workspace(name = "resty")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ ],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(version = "1.16")
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
new file mode 100644
index 00000000..1a03efa3
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/client.go
@@ -0,0 +1,1115 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // MethodGet HTTP method
+ MethodGet = "GET"
+
+ // MethodPost HTTP method
+ MethodPost = "POST"
+
+ // MethodPut HTTP method
+ MethodPut = "PUT"
+
+ // MethodDelete HTTP method
+ MethodDelete = "DELETE"
+
+ // MethodPatch HTTP method
+ MethodPatch = "PATCH"
+
+ // MethodHead HTTP method
+ MethodHead = "HEAD"
+
+ // MethodOptions HTTP method
+ MethodOptions = "OPTIONS"
+)
+
+var (
+ hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
+ hdrAcceptKey = http.CanonicalHeaderKey("Accept")
+ hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
+ hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
+ hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
+ hdrLocationKey = http.CanonicalHeaderKey("Location")
+
+ plainTextType = "text/plain; charset=utf-8"
+ jsonContentType = "application/json"
+ formContentType = "application/x-www-form-urlencoded"
+
+ jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`)
+ xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`)
+
+ hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
+ bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+)
+
+type (
+ // RequestMiddleware type is for request middleware, called before a request is sent
+ RequestMiddleware func(*Client, *Request) error
+
+ // ResponseMiddleware type is for response middleware, called after a response has been received
+ ResponseMiddleware func(*Client, *Response) error
+
+ // PreRequestHook type is for the request hook, called right before the request is sent
+ PreRequestHook func(*Client, *http.Request) error
+
+ // RequestLogCallback type is for request logs, called before the request is logged
+ RequestLogCallback func(*RequestLog) error
+
+ // ResponseLogCallback type is for response logs, called before the response is logged
+ ResponseLogCallback func(*ResponseLog) error
+
+ // ErrorHook type is for reacting to request errors, called after all retries were attempted
+ ErrorHook func(*Request, error)
+)
+
+// Client struct is used to create Resty client with client level settings,
+// these settings are applicable to all the request raised from the client.
+//
+// Resty also provides an options to override most of the client settings
+// at request level.
+type Client struct {
+ BaseURL string
+ HostURL string // Deprecated: use BaseURL instead. To be removed in v3.0.0 release.
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ UserInfo *User
+ Token string
+ AuthScheme string
+ Cookies []*http.Cookie
+ Error reflect.Type
+ Debug bool
+ DisableWarn bool
+ AllowGetMethodPayload bool
+ RetryCount int
+ RetryWaitTime time.Duration
+ RetryMaxWaitTime time.Duration
+ RetryConditions []RetryConditionFunc
+ RetryHooks []OnRetryFunc
+ RetryAfter RetryAfterFunc
+ JSONMarshal func(v interface{}) ([]byte, error)
+ JSONUnmarshal func(data []byte, v interface{}) error
+ XMLMarshal func(v interface{}) ([]byte, error)
+ XMLUnmarshal func(data []byte, v interface{}) error
+
+ // HeaderAuthorizationKey is used to set/access Request Authorization header
+ // value when `SetAuthToken` option is used.
+ HeaderAuthorizationKey string
+
+ jsonEscapeHTML bool
+ setContentLength bool
+ closeConnection bool
+ notParseResponse bool
+ trace bool
+ debugBodySizeLimit int64
+ outputDirectory string
+ scheme string
+ log Logger
+ httpClient *http.Client
+ proxyURL *url.URL
+ beforeRequest []RequestMiddleware
+ udBeforeRequest []RequestMiddleware
+ preReqHook PreRequestHook
+ afterResponse []ResponseMiddleware
+ requestLog RequestLogCallback
+ responseLog ResponseLogCallback
+ errorHooks []ErrorHook
+}
+
+// User type is to hold an username and password information
+type User struct {
+ Username, Password string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client methods
+//___________________________________
+
+// SetHostURL method is to set Host URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetHostURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetHostURL("https://myjeeva.com")
+//
+// Deprecated: use SetBaseURL instead. To be removed in v3.0.0 release.
+func (c *Client) SetHostURL(url string) *Client {
+ c.SetBaseURL(url)
+ return c
+}
+
+// SetBaseURL method is to set Base URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetBaseURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetBaseURL("https://myjeeva.com")
+//
+// Since v2.7.0
+func (c *Client) SetBaseURL(url string) *Client {
+ c.BaseURL = strings.TrimRight(url, "/")
+ c.HostURL = c.BaseURL
+ return c
+}
+
+// SetHeader method sets a single header field and its value in the client instance.
+// These headers will be applied to all requests raised from this client instance.
+// Also it can be overridden at request level header options.
+//
+// See `Request.SetHeader` or `Request.SetHeaders`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+func (c *Client) SetHeader(header, value string) *Client {
+ c.Header.Set(header, value)
+ return c
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the client instance.
+// These headers will be applied to all requests raised from this client instance. Also it can be
+// overridden at request level headers options.
+//
+// See `Request.SetHeaders` or `Request.SetHeader`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+func (c *Client) SetHeaders(headers map[string]string) *Client {
+ for h, v := range headers {
+ c.Header.Set(h, v)
+ }
+ return c
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (c *Client) SetHeaderVerbatim(header, value string) *Client {
+ c.Header[header] = []string{value}
+ return c
+}
+
+// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
+//
+// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
+// CookieJar in resty client.
+//
+// client.SetCookieJar(nil)
+func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
+ c.httpClient.Jar = jar
+ return c
+}
+
+// SetCookie method appends a single cookie in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// client.SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+func (c *Client) SetCookie(hc *http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, hc)
+ return c
+}
+
+// SetCookies method sets an array of cookies in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty
+// client.SetCookies(cookies)
+func (c *Client) SetCookies(cs []*http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, cs...)
+ return c
+}
+
+// SetQueryParam method sets single parameter and its value in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from
+// this client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParam` or `Request.SetQueryParams`.
+// client.
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+func (c *Client) SetQueryParam(param, value string) *Client {
+ c.QueryParam.Set(param, value)
+ return c
+}
+
+// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from this
+// client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParams` or `Request.SetQueryParam`.
+// client.SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+func (c *Client) SetQueryParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetQueryParam(p, v)
+ }
+ return c
+}
+
+// SetFormData method sets Form parameters and their values in the client instance.
+// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as
+// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
+// this client instance. Also it can be overridden at request level form data.
+//
+// See `Request.SetFormData`.
+// client.SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+func (c *Client) SetFormData(data map[string]string) *Client {
+ for k, v := range data {
+ c.FormData.Set(k, v)
+ }
+ return c
+}
+
+// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
+// Authorization: Basic
+//
+// For Example: To set the header for username "go-resty" and password "welcome"
+// client.SetBasicAuth("go-resty", "welcome")
+//
+// This basic auth information gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetBasicAuth`.
+func (c *Client) SetBasicAuth(username, password string) *Client {
+ c.UserInfo = &User{Username: username, Password: password}
+ return c
+}
+
+// SetAuthToken method sets the auth token of the `Authorization` header for all HTTP requests.
+// The default auth scheme is `Bearer`, it can be customized with the method `SetAuthScheme`. For Example:
+// Authorization:
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This auth token gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthToken(token string) *Client {
+ c.Token = token
+ return c
+}
+
+// SetAuthScheme method sets the auth scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.SetAuthScheme("OAuth")
+//
+// This auth scheme gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about auth schemes can be found in RFC7235 which is linked to below
+// along with the page containing the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthScheme(scheme string) *Client {
+ c.AuthScheme = scheme
+ return c
+}
+
+// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) R() *Request {
+ r := &Request{
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+
+ client: c,
+ multipartFiles: []*File{},
+ multipartFields: []*MultipartField{},
+ PathParams: map[string]string{},
+ jsonEscapeHTML: true,
+ }
+ return r
+}
+
+// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
+// Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) NewRequest() *Request {
+ return c.R()
+}
+
+// OnBeforeRequest method appends request middleware into the before request chain.
+// Its gets applied after default Resty request middlewares and before request
+// been sent from Resty to host server.
+// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
+// // Now you have access to Client and Request instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client {
+ c.udBeforeRequest = append(c.udBeforeRequest, m)
+ return c
+}
+
+// OnAfterResponse method appends response middleware into the after response chain.
+// Once we receive response from host server, default Resty response middleware
+// gets applied and then user assigened response middlewares applied.
+// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
+// // Now you have access to Client and Response instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client {
+ c.afterResponse = append(c.afterResponse, m)
+ return c
+}
+
+// OnError method adds a callback that will be run whenever a request execution fails.
+// This is called after all retries have been attempted (if any).
+// If there was a response from the server, the error will be wrapped in *ResponseError
+// which has the last response received from the server.
+//
+// client.OnError(func(req *resty.Request, err error) {
+// if v, ok := err.(*resty.ResponseError); ok {
+// // Do something with v.Response
+// }
+// // Log the error, increment a metric, etc...
+// })
+func (c *Client) OnError(h ErrorHook) *Client {
+ c.errorHooks = append(c.errorHooks, h)
+ return c
+}
+
+// SetPreRequestHook method sets the given pre-request function into resty client.
+// It is called right before the request is fired.
+//
+// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for mutilple.
+func (c *Client) SetPreRequestHook(h PreRequestHook) *Client {
+ if c.preReqHook != nil {
+ c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
+ }
+ c.preReqHook = h
+ return c
+}
+
+// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+// client.SetDebug(true)
+func (c *Client) SetDebug(d bool) *Client {
+ c.Debug = d
+ return c
+}
+
+// SetDebugBodyLimit sets the maximum size for which the response and request body will be logged in debug mode.
+// client.SetDebugBodyLimit(1000000)
+func (c *Client) SetDebugBodyLimit(sl int64) *Client {
+ c.debugBodySizeLimit = sl
+ return c
+}
+
+// OnRequestLog method used to set request log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnRequestLog(rl RequestLogCallback) *Client {
+ if c.requestLog != nil {
+ c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
+ functionName(c.requestLog), functionName(rl))
+ }
+ c.requestLog = rl
+ return c
+}
+
+// OnResponseLog method used to set response log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnResponseLog(rl ResponseLogCallback) *Client {
+ if c.responseLog != nil {
+ c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
+ functionName(c.responseLog), functionName(rl))
+ }
+ c.responseLog = rl
+ return c
+}
+
+// SetDisableWarn method disables the warning message on Resty client.
+//
+// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
+// client.SetDisableWarn(true)
+func (c *Client) SetDisableWarn(d bool) *Client {
+ c.DisableWarn = d
+ return c
+}
+
+// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
+//
+// For Example: Resty allows the user sends request with a payload on HTTP GET method.
+// client.SetAllowGetMethodPayload(true)
+func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
+ c.AllowGetMethodPayload = a
+ return c
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+//
+// Compliant to interface `resty.Logger`.
+func (c *Client) SetLogger(l Logger) *Client {
+ c.log = l
+ return c
+}
+
+// SetContentLength method enables the HTTP header `Content-Length` value for every request.
+// By default Resty won't set `Content-Length`.
+// client.SetContentLength(true)
+//
+// Also you have an option to enable for particular request. See `Request.SetContentLength`
+func (c *Client) SetContentLength(l bool) *Client {
+ c.setContentLength = l
+ return c
+}
+
+// SetTimeout method sets timeout for request raised from client.
+// client.SetTimeout(time.Duration(1 * time.Minute))
+func (c *Client) SetTimeout(timeout time.Duration) *Client {
+ c.httpClient.Timeout = timeout
+ return c
+}
+
+// SetError method is to register the global or client common `Error` object into Resty.
+// It is used for automatic unmarshalling if response status code is greater than 399 and
+// content type either JSON or XML. Can be pointer or non-pointer.
+// client.SetError(&Error{})
+// // OR
+// client.SetError(Error{})
+func (c *Client) SetError(err interface{}) *Client {
+ c.Error = typeOf(err)
+ return c
+}
+
+// SetRedirectPolicy method sets the client redirect poilicy. Resty provides ready to use
+// redirect policies. Wanna create one for yourself refer to `redirect.go`.
+//
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+//
+// // Need multiple redirect policies together
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
+func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
+ for _, p := range policies {
+ if _, ok := p.(RedirectPolicy); !ok {
+ c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
+ functionName(p))
+ }
+ }
+
+ c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ for _, p := range policies {
+ if err := p.(RedirectPolicy).Apply(req, via); err != nil {
+ return err
+ }
+ }
+ return nil // looks good, go ahead
+ }
+
+ return c
+}
+
+// SetRetryCount method enables retry on Resty client and allows you
+// to set no. of retry count. Resty uses a Backoff mechanism.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.RetryCount = count
+ return c
+}
+
+// SetRetryWaitTime method sets default wait time to sleep before retrying
+// request.
+//
+// Default is 100 milliseconds.
+func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
+ c.RetryWaitTime = waitTime
+ return c
+}
+
+// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
+// request.
+//
+// Default is 2 seconds.
+func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
+ c.RetryMaxWaitTime = maxWaitTime
+ return c
+}
+
+// SetRetryAfter sets callback to calculate wait time between retries.
+// Default (nil) implies exponential backoff with jitter
+func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
+ c.RetryAfter = callback
+ return c
+}
+
+// AddRetryCondition method adds a retry condition function to array of functions
+// that are checked to determine if the request is retried. The request will
+// retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are applied on all Request made using this Client.
+// For Request specific retry conditions check *Request.AddRetryCondition
+func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
+ c.RetryConditions = append(c.RetryConditions, condition)
+ return c
+}
+
+// AddRetryAfterErrorCondition adds the basic condition of retrying after encountering
+// an error from the http response
+//
+// Since v2.6.0
+func (c *Client) AddRetryAfterErrorCondition() *Client {
+ c.AddRetryCondition(func(response *Response, err error) bool {
+ return response.IsError()
+ })
+ return c
+}
+
+// AddRetryHook adds a side-effecting retry hook to an array of hooks
+// that will be executed on each retry.
+//
+// Since v2.6.0
+func (c *Client) AddRetryHook(hook OnRetryFunc) *Client {
+ c.RetryHooks = append(c.RetryHooks, hook)
+ return c
+}
+
+// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
+//
+// For Example:
+// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+//
+// // or One can disable security check (https)
+// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+//
+// Note: This method overwrites existing `TLSClientConfig`.
+func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ transport.TLSClientConfig = config
+ return c
+}
+
+// SetProxy method sets the Proxy URL and Port for Resty client.
+// client.SetProxy("http://proxyserver:8888")
+//
+// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
+//
+// Refer to godoc `http.ProxyFromEnvironment`.
+func (c *Client) SetProxy(proxyURL string) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ pURL, err := url.Parse(proxyURL)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ c.proxyURL = pURL
+ transport.Proxy = http.ProxyURL(c.proxyURL)
+ return c
+}
+
+// RemoveProxy method removes the proxy configuration from Resty client
+// client.RemoveProxy()
+func (c *Client) RemoveProxy() *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ c.proxyURL = nil
+ transport.Proxy = nil
+ return c
+}
+
+// SetCertificates method helps to set client certificates into Resty conveniently.
+func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ config.Certificates = append(config.Certificates, certs...)
+ return c
+}
+
+// SetRootCertificate method helps to add one or more root certificates into Resty client
+// client.SetRootCertificate("/path/to/root/pemFile.pem")
+func (c *Client) SetRootCertificate(pemFilePath string) *Client {
+ rootPemData, err := ioutil.ReadFile(pemFilePath)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM(rootPemData)
+ return c
+}
+
+// SetRootCertificateFromString method helps to add one or more root certificates into Resty client
+// client.SetRootCertificateFromString("pem file content")
+func (c *Client) SetRootCertificateFromString(pemContent string) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM([]byte(pemContent))
+ return c
+}
+
+// SetOutputDirectory method sets output directory for saving HTTP response into file.
+// If the output directory not exists then resty creates one. This setting is optional one,
+// if you're planning using absolute path in `Request.SetOutput` and can used together.
+// client.SetOutputDirectory("/save/http/response/here")
+func (c *Client) SetOutputDirectory(dirPath string) *Client {
+ c.outputDirectory = dirPath
+ return c
+}
+
+// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
+// compatible interface implementation in the resty client.
+//
+// Note:
+//
+// - If transport is not type of `*http.Transport` then you may not be able to
+// take advantage of some of the Resty client settings.
+//
+// - It overwrites the Resty client transport instance and it's configurations.
+//
+// transport := &http.Transport{
+// // somthing like Proxying to httptest.Server, etc...
+// Proxy: func(req *http.Request) (*url.URL, error) {
+// return url.Parse(server.URL)
+// },
+// }
+//
+// client.SetTransport(transport)
+func (c *Client) SetTransport(transport http.RoundTripper) *Client {
+ if transport != nil {
+ c.httpClient.Transport = transport
+ }
+ return c
+}
+
+// SetScheme method sets custom scheme in the Resty client. It's way to override default.
+// client.SetScheme("http")
+func (c *Client) SetScheme(scheme string) *Client {
+ if !IsStringEmpty(scheme) {
+ c.scheme = strings.TrimSpace(scheme)
+ }
+ return c
+}
+
+// SetCloseConnection method sets variable `Close` in http request struct with the given
+// value. More info: https://golang.org/src/net/http/request.go
+func (c *Client) SetCloseConnection(close bool) *Client {
+ c.closeConnection = close
+ return c
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (c *Client) SetDoNotParseResponse(parse bool) *Client {
+ c.notParseResponse = parse
+ return c
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty client instance.
+// client.SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParam(param, value string) *Client {
+ c.PathParams[param] = value
+ return c
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetPathParam(p, v)
+ }
+ return c
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (c *Client) SetJSONEscapeHTML(b bool) *Client {
+ c.jsonEscapeHTML = b
+ return c
+}
+
+// EnableTrace method enables the Resty client trace for the requests fired from
+// the client using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New().EnableTrace()
+//
+// resp, err := client.R().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// Also `Request.EnableTrace` available too to get trace info for single request.
+//
+// Since v2.0.0
+func (c *Client) EnableTrace() *Client {
+ c.trace = true
+ return c
+}
+
+// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
+//
+// Since v2.0.0
+func (c *Client) DisableTrace() *Client {
+ c.trace = false
+ return c
+}
+
+// IsProxySet method returns the true is proxy is set from resty client otherwise
+// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
+func (c *Client) IsProxySet() bool {
+ return c.proxyURL != nil
+}
+
+// GetClient method returns the current `http.Client` used by the resty client.
+func (c *Client) GetClient() *http.Client {
+ return c.httpClient
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client Unexported methods
+//_______________________________________________________________________
+
+// Executes method executes the given `Request` object and returns response
+// error.
+func (c *Client) execute(req *Request) (*Response, error) {
+ // Apply Request middleware
+ var err error
+
+ // user defined on before request methods
+ // to modify the *resty.Request object
+ for _, f := range c.udBeforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ // resty middlewares
+ for _, f := range c.beforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if hostHeader := req.Header.Get("Host"); hostHeader != "" {
+ req.RawRequest.Host = hostHeader
+ }
+
+ // call pre-request if defined
+ if c.preReqHook != nil {
+ if err = c.preReqHook(c, req.RawRequest); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if err = requestLogger(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+
+ req.RawRequest.Body = newRequestBodyReleaser(req.RawRequest.Body, req.bodyBuf)
+
+ req.Time = time.Now()
+ resp, err := c.httpClient.Do(req.RawRequest)
+
+ response := &Response{
+ Request: req,
+ RawResponse: resp,
+ }
+
+ if err != nil || req.notParseResponse || c.notParseResponse {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ if !req.isSaveResponse {
+ defer closeq(resp.Body)
+ body := resp.Body
+
+ // GitHub #142 & #187
+ if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
+ if _, ok := body.(*gzip.Reader); !ok {
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+ defer closeq(body)
+ }
+ }
+
+ if response.body, err = ioutil.ReadAll(body); err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ response.size = int64(len(response.body))
+ }
+
+ response.setReceivedAt() // after we read the body
+
+ // Apply Response middleware
+ for _, f := range c.afterResponse {
+ if err = f(c, response); err != nil {
+ break
+ }
+ }
+
+ return response, wrapNoRetryErr(err)
+}
+
+// getting TLS client config if not exists then create one
+func (c *Client) tlsConfig() (*tls.Config, error) {
+ transport, err := c.transport()
+ if err != nil {
+ return nil, err
+ }
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{}
+ }
+ return transport.TLSClientConfig, nil
+}
+
+// Transport method returns `*http.Transport` currently in use or error
+// in case currently used `transport` is not a `*http.Transport`.
+func (c *Client) transport() (*http.Transport, error) {
+ if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
+ return transport, nil
+ }
+ return nil, errors.New("current transport is not an *http.Transport instance")
+}
+
+// just an internal helper method
+func (c *Client) outputLogTo(w io.Writer) *Client {
+ c.log.(*logger).l.SetOutput(w)
+ return c
+}
+
+// ResponseError is a wrapper for including the server response with an error.
+// Neither the err nor the response should be nil.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+func (e *ResponseError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *ResponseError) Unwrap() error {
+ return e.Err
+}
+
+// Helper to run onErrorHooks hooks.
+// It wraps the error in a ResponseError if the resp is not nil
+// so hooks can access it.
+func (c *Client) onErrorHooks(req *Request, resp *Response, err error) {
+ if err != nil {
+ if resp != nil { // wrap with ResponseError
+ err = &ResponseError{Response: resp, Err: err}
+ }
+ for _, h := range c.errorHooks {
+ h(req, err)
+ }
+ }
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// File struct and its methods
+//_______________________________________________________________________
+
+// File struct represent file information for multipart request
+type File struct {
+ Name string
+ ParamName string
+ io.Reader
+}
+
+// String returns string value of current file details
+func (f *File) String() string {
+ return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// MultipartField struct
+//_______________________________________________________________________
+
+// MultipartField struct represent custom data part for multipart request
+type MultipartField struct {
+ Param string
+ FileName string
+ ContentType string
+ io.Reader
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Unexported package methods
+//_______________________________________________________________________
+
+func createClient(hc *http.Client) *Client {
+ if hc.Transport == nil {
+ hc.Transport = createTransport(nil)
+ }
+
+ c := &Client{ // not setting lang default values
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ RetryWaitTime: defaultWaitTime,
+ RetryMaxWaitTime: defaultMaxWaitTime,
+ PathParams: make(map[string]string),
+ JSONMarshal: json.Marshal,
+ JSONUnmarshal: json.Unmarshal,
+ XMLMarshal: xml.Marshal,
+ XMLUnmarshal: xml.Unmarshal,
+ HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
+
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ }
+
+ // Logger
+ c.SetLogger(createLogger())
+
+ // default before request middlewares
+ c.beforeRequest = []RequestMiddleware{
+ parseRequestURL,
+ parseRequestHeader,
+ parseRequestBody,
+ createHTTPRequest,
+ addCredentials,
+ }
+
+ // user defined request middlewares
+ c.udBeforeRequest = []RequestMiddleware{}
+
+ // default after response middlewares
+ c.afterResponse = []ResponseMiddleware{
+ responseLogger,
+ parseResponseBody,
+ saveResponseIntoFile,
+ }
+
+ return c
+}
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
new file mode 100644
index 00000000..0e8ac2b6
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -0,0 +1,543 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+)
+
+const debugRequestLogKey = "__restyDebugRequestLog"
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Middleware(s)
+//_______________________________________________________________________
+
+func parseRequestURL(c *Client, r *Request) error {
+ // GitHub #103 Path Params
+ if len(r.PathParams) > 0 {
+ for p, v := range r.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+ if len(c.PathParams) > 0 {
+ for p, v := range c.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+
+ // Parsing request URL
+ reqURL, err := url.Parse(r.URL)
+ if err != nil {
+ return err
+ }
+
+ // If Request.URL is relative path then added c.HostURL into
+ // the request URL otherwise Request.URL will be used as-is
+ if !reqURL.IsAbs() {
+ r.URL = reqURL.String()
+ if len(r.URL) > 0 && r.URL[0] != '/' {
+ r.URL = "/" + r.URL
+ }
+
+ reqURL, err = url.Parse(c.HostURL + r.URL)
+ if err != nil {
+ return err
+ }
+ }
+
+ // GH #407 && #318
+ if reqURL.Scheme == "" && len(c.scheme) > 0 {
+ reqURL.Scheme = c.scheme
+ }
+
+ // Adding Query Param
+ query := make(url.Values)
+ for k, v := range c.QueryParam {
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.QueryParam {
+ // remove query param from client level by key
+ // since overrides happens for that key in the request
+ query.Del(k)
+
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ // GitHub #123 Preserve query string order partially.
+ // Since not feasible in `SetQuery*` resty methods, because
+ // standard package `url.Encode(...)` sorts the query params
+ // alphabetically
+ if len(query) > 0 {
+ if IsStringEmpty(reqURL.RawQuery) {
+ reqURL.RawQuery = query.Encode()
+ } else {
+ reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
+ }
+ }
+
+ r.URL = reqURL.String()
+
+ return nil
+}
+
+func parseRequestHeader(c *Client, r *Request) error {
+ hdr := make(http.Header)
+ for k := range c.Header {
+ hdr[k] = append(hdr[k], c.Header[k]...)
+ }
+
+ for k := range r.Header {
+ hdr.Del(k)
+ hdr[k] = append(hdr[k], r.Header[k]...)
+ }
+
+ if IsStringEmpty(hdr.Get(hdrUserAgentKey)) {
+ hdr.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+
+ ct := hdr.Get(hdrContentTypeKey)
+ if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) &&
+ (IsJSONType(ct) || IsXMLType(ct)) {
+ hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey))
+ }
+
+ r.Header = hdr
+
+ return nil
+}
+
+func parseRequestBody(c *Client, r *Request) (err error) {
+ if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ // Handling Multipart
+ if r.isMultiPart && !(r.Method == MethodPatch) {
+ if err = handleMultipart(c, r); err != nil {
+ return
+ }
+
+ goto CL
+ }
+
+ // Handling Form Data
+ if len(c.FormData) > 0 || len(r.FormData) > 0 {
+ handleFormData(c, r)
+
+ goto CL
+ }
+
+ // Handling Request body
+ if r.Body != nil {
+ handleContentType(c, r)
+
+ if err = handleRequestBody(c, r); err != nil {
+ return
+ }
+ }
+ }
+
+CL:
+ // by default resty won't set content length, you can if you want to :)
+ if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil {
+ r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
+ }
+
+ return
+}
+
+func createHTTPRequest(c *Client, r *Request) (err error) {
+ if r.bodyBuf == nil {
+ if reader, ok := r.Body.(io.Reader); ok {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
+ } else if c.setContentLength || r.setContentLength {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, http.NoBody)
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
+ }
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf)
+ }
+
+ if err != nil {
+ return
+ }
+
+ // Assign close connection option
+ r.RawRequest.Close = c.closeConnection
+
+ // Add headers into http request
+ r.RawRequest.Header = r.Header
+
+ // Add cookies from client instance into http request
+ for _, cookie := range c.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Add cookies from request instance into http request
+ for _, cookie := range r.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Enable trace
+ if c.trace || r.trace {
+ r.clientTrace = &clientTrace{}
+ r.ctx = r.clientTrace.createContext(r.Context())
+ }
+
+ // Use context if it was specified
+ if r.ctx != nil {
+ r.RawRequest = r.RawRequest.WithContext(r.ctx)
+ }
+
+ bodyCopy, err := getBodyCopy(r)
+ if err != nil {
+ return err
+ }
+
+ // assign get body func for the underlying raw request instance
+ r.RawRequest.GetBody = func() (io.ReadCloser, error) {
+ if bodyCopy != nil {
+ return ioutil.NopCloser(bytes.NewReader(bodyCopy.Bytes())), nil
+ }
+ return nil, nil
+ }
+
+ return
+}
+
+func addCredentials(c *Client, r *Request) error {
+ var isBasicAuth bool
+ // Basic Auth
+ if r.UserInfo != nil { // takes precedence
+ r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
+ isBasicAuth = true
+ } else if c.UserInfo != nil {
+ r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
+ isBasicAuth = true
+ }
+
+ if !c.DisableWarn {
+ if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
+ c.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
+ }
+ }
+
+ // Set the Authorization Header Scheme
+ var authScheme string
+ if !IsStringEmpty(r.AuthScheme) {
+ authScheme = r.AuthScheme
+ } else if !IsStringEmpty(c.AuthScheme) {
+ authScheme = c.AuthScheme
+ } else {
+ authScheme = "Bearer"
+ }
+
+ // Build the Token Auth header
+ if !IsStringEmpty(r.Token) { // takes precedence
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+r.Token)
+ } else if !IsStringEmpty(c.Token) {
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+c.Token)
+ }
+
+ return nil
+}
+
+func requestLogger(c *Client, r *Request) error {
+ if c.Debug {
+ rr := r.RawRequest
+ rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString(c.debugBodySizeLimit)}
+ if c.requestLog != nil {
+ if err := c.requestLog(rl); err != nil {
+ return err
+ }
+ }
+ // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
+
+ reqLog := "\n==============================================================================\n" +
+ "~~~ REQUEST ~~~\n" +
+ fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
+ fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
+ fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
+ fmt.Sprintf("BODY :\n%v\n", rl.Body) +
+ "------------------------------------------------------------------------------\n"
+
+ r.initValuesMap()
+ r.values[debugRequestLogKey] = reqLog
+ }
+
+ return nil
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Middleware(s)
+//_______________________________________________________________________
+
+func responseLogger(c *Client, res *Response) error {
+ if c.Debug {
+ rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
+ if c.responseLog != nil {
+ if err := c.responseLog(rl); err != nil {
+ return err
+ }
+ }
+
+ debugLog := res.Request.values[debugRequestLogKey].(string)
+ debugLog += "~~~ RESPONSE ~~~\n" +
+ fmt.Sprintf("STATUS : %s\n", res.Status()) +
+ fmt.Sprintf("PROTO : %s\n", res.RawResponse.Proto) +
+ fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
+ fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
+ "HEADERS :\n" +
+ composeHeaders(c, res.Request, rl.Header) + "\n"
+ if res.Request.isSaveResponse {
+ debugLog += "BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n"
+ } else {
+ debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
+ }
+ debugLog += "==============================================================================\n"
+
+ c.log.Debugf("%s", debugLog)
+ }
+
+ return nil
+}
+
+func parseResponseBody(c *Client, res *Response) (err error) {
+ if res.StatusCode() == http.StatusNoContent {
+ return
+ }
+ // Handles only JSON or XML content type
+ ct := firstNonEmpty(res.Request.forceContentType, res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
+ if IsJSONType(ct) || IsXMLType(ct) {
+ // HTTP status code > 199 and < 300, considered as Result
+ if res.IsSuccess() {
+ res.Request.Error = nil
+ if res.Request.Result != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Result)
+ return
+ }
+ }
+
+ // HTTP status code > 399, considered as Error
+ if res.IsError() {
+ // global error interface
+ if res.Request.Error == nil && c.Error != nil {
+ res.Request.Error = reflect.New(c.Error).Interface()
+ }
+
+ if res.Request.Error != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func handleMultipart(c *Client, r *Request) (err error) {
+ r.bodyBuf = acquireBuffer()
+ w := multipart.NewWriter(r.bodyBuf)
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range r.FormData {
+ for _, iv := range v {
+ if strings.HasPrefix(k, "@") { // file
+ err = addFile(w, k[1:], iv)
+ if err != nil {
+ return
+ }
+ } else { // form value
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // #21 - adding io.Reader support
+ if len(r.multipartFiles) > 0 {
+ for _, f := range r.multipartFiles {
+ err = addFileReader(w, f)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ // GitHub #130 adding multipart field support with content type
+ if len(r.multipartFields) > 0 {
+ for _, mf := range r.multipartFields {
+ if err = addMultipartFormField(w, mf); err != nil {
+ return
+ }
+ }
+ }
+
+ r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
+ err = w.Close()
+
+ return
+}
+
+func handleFormData(c *Client, r *Request) {
+ formData := url.Values{}
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.FormData {
+ // remove form data field from client level by key
+ // since overrides happens for that key in the request
+ formData.Del(k)
+
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
+ r.Header.Set(hdrContentTypeKey, formContentType)
+ r.isFormData = true
+}
+
+func handleContentType(c *Client, r *Request) {
+ contentType := r.Header.Get(hdrContentTypeKey)
+ if IsStringEmpty(contentType) {
+ contentType = DetectContentType(r.Body)
+ r.Header.Set(hdrContentTypeKey, contentType)
+ }
+}
+
+func handleRequestBody(c *Client, r *Request) (err error) {
+ var bodyBytes []byte
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ r.bodyBuf = nil
+
+ if reader, ok := r.Body.(io.Reader); ok {
+ if c.setContentLength || r.setContentLength { // keep backward compatibility
+ r.bodyBuf = acquireBuffer()
+ _, err = r.bodyBuf.ReadFrom(reader)
+ r.Body = nil
+ } else {
+ // Otherwise buffer less processing for `io.Reader`, sounds good.
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ bodyBytes = b
+ } else if s, ok := r.Body.(string); ok {
+ bodyBytes = []byte(s)
+ } else if IsJSONType(contentType) &&
+ (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
+ r.bodyBuf, err = jsonMarshal(c, r, r.Body)
+ if err != nil {
+ return
+ }
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ bodyBytes, err = c.XMLMarshal(r.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ if bodyBytes == nil && r.bodyBuf == nil {
+ err = errors.New("unsupported 'Body' type/value")
+ }
+
+ // if any errors during body bytes handling, return it
+ if err != nil {
+ return
+ }
+
+ // []byte into Buffer
+ if bodyBytes != nil && r.bodyBuf == nil {
+ r.bodyBuf = acquireBuffer()
+ _, _ = r.bodyBuf.Write(bodyBytes)
+ }
+
+ return
+}
+
+func saveResponseIntoFile(c *Client, res *Response) error {
+ if res.Request.isSaveResponse {
+ file := ""
+
+ if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
+ file += c.outputDirectory + string(filepath.Separator)
+ }
+
+ file = filepath.Clean(file + res.Request.outputFile)
+ if err := createDirectory(filepath.Dir(file)); err != nil {
+ return err
+ }
+
+ outFile, err := os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer closeq(outFile)
+
+ // io.Copy reads maximum 32kb size, it is perfect for large file download too
+ defer closeq(res.RawResponse.Body)
+
+ written, err := io.Copy(outFile, res.RawResponse.Body)
+ if err != nil {
+ return err
+ }
+
+ res.size = written
+ }
+
+ return nil
+}
+
+func getBodyCopy(r *Request) (*bytes.Buffer, error) {
+ // If r.bodyBuf present, return the copy
+ if r.bodyBuf != nil {
+ return bytes.NewBuffer(r.bodyBuf.Bytes()), nil
+ }
+
+ // Maybe body is `io.Reader`.
+ // Note: Resty user have to watchout for large body size of `io.Reader`
+ if r.RawRequest.Body != nil {
+ b, err := ioutil.ReadAll(r.RawRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Restore the Body
+ closeq(r.RawRequest.Body)
+ r.RawRequest.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+
+ // Return the Body bytes
+ return bytes.NewBuffer(b), nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/redirect.go b/vendor/github.com/go-resty/resty/v2/redirect.go
new file mode 100644
index 00000000..7d7e43bc
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/redirect.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+type (
+ // RedirectPolicy to regulate the redirects in the resty client.
+ // Objects implementing the RedirectPolicy interface can be registered as
+ //
+ // Apply function should return nil to continue the redirect jounery, otherwise
+ // return error to stop the redirect.
+ RedirectPolicy interface {
+ Apply(req *http.Request, via []*http.Request) error
+ }
+
+ // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
+ // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
+ RedirectPolicyFunc func(*http.Request, []*http.Request) error
+)
+
+// Apply calls f(req, via).
+func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
+ return f(req, via)
+}
+
+// NoRedirectPolicy is used to disable redirects in the HTTP client
+// resty.SetRedirectPolicy(NoRedirectPolicy())
+func NoRedirectPolicy() RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ return errors.New("auto redirect is disabled")
+ })
+}
+
+// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
+// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if len(via) >= noOfRedirect {
+ return fmt.Errorf("stopped after %d redirects", noOfRedirect)
+ }
+ checkHostAndAddHeaders(req, via[0])
+ return nil
+ })
+}
+
+// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
+// Redirect is allowed for only mentioned host in the policy.
+// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
+ hosts := make(map[string]bool)
+ for _, h := range hostnames {
+ hosts[strings.ToLower(h)] = true
+ }
+
+ fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if ok := hosts[getHostname(req.URL.Host)]; !ok {
+ return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
+ }
+
+ return nil
+ })
+
+ return fn
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+func getHostname(host string) (hostname string) {
+ if strings.Index(host, ":") > 0 {
+ host, _, _ = net.SplitHostPort(host)
+ }
+ hostname = strings.ToLower(host)
+ return
+}
+
+// By default Golang will not redirect request headers
+// after go throughing various discussion comments from thread
+// https://github.com/golang/go/issues/4800
+// Resty will add all the headers during a redirect for the same host
+func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
+ curHostname := getHostname(cur.URL.Host)
+ preHostname := getHostname(pre.URL.Host)
+ if strings.EqualFold(curHostname, preHostname) {
+ for key, val := range pre.Header {
+ cur.Header[key] = val
+ }
+ } else { // only library User-Agent header is added
+ cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
new file mode 100644
index 00000000..672df88c
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/request.go
@@ -0,0 +1,896 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request struct and methods
+//_______________________________________________________________________
+
+// Request struct is used to compose and fire individual request from
+// resty client. Request provides an options to override client level
+// settings and also an options for the request composition.
+type Request struct {
+ URL string
+ Method string
+ Token string
+ AuthScheme string
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ Time time.Time
+ Body interface{}
+ Result interface{}
+ Error interface{}
+ RawRequest *http.Request
+ SRV *SRVRecord
+ UserInfo *User
+ Cookies []*http.Cookie
+
+ // Attempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ //
+ // Since v2.4.0
+ Attempt int
+
+ isMultiPart bool
+ isFormData bool
+ setContentLength bool
+ isSaveResponse bool
+ notParseResponse bool
+ jsonEscapeHTML bool
+ trace bool
+ outputFile string
+ fallbackContentType string
+ forceContentType string
+ ctx context.Context
+ values map[string]interface{}
+ client *Client
+ bodyBuf *bytes.Buffer
+ clientTrace *clientTrace
+ multipartFiles []*File
+ multipartFields []*MultipartField
+ retryConditions []RetryConditionFunc
+}
+
+// Context method returns the Context if its already set in request
+// otherwise it creates new one using `context.Background()`.
+func (r *Request) Context() context.Context {
+ if r.ctx == nil {
+ return context.Background()
+ }
+ return r.ctx
+}
+
+// SetContext method sets the context.Context for current Request. It allows
+// to interrupt the request execution if ctx.Done() channel is closed.
+// See https://blog.golang.org/context article and the "context" package
+// documentation.
+func (r *Request) SetContext(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// SetHeader method is to set a single header field and its value in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`.
+// client.R().
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeader(header, value string) *Request {
+ r.Header.Set(header, value)
+ return r
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.R().
+// SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaders(headers map[string]string) *Request {
+ for h, v := range headers {
+ r.SetHeader(h, v)
+ }
+ return r
+}
+
+// SetHeaderMultiValues sets multiple headers fields and its values is list of strings at one go in the current request.
+//
+// For Example: To set `Accept` as `text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8`
+//
+// client.R().
+// SetHeaderMultiValues(map[string][]string{
+// "Accept": []string{"text/html", "application/xhtml+xml", "application/xml;q=0.9", "image/webp", "*/*;q=0.8"},
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaderMultiValues(headers map[string][]string) *Request {
+ for key, values := range headers {
+ r.SetHeader(key, strings.Join(values, ", "))
+ }
+ return r
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (r *Request) SetHeaderVerbatim(header, value string) *Request {
+ r.Header[header] = []string{value}
+ return r
+}
+
+// SetQueryParam method sets single parameter and its value in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParam(param, value string) *Request {
+ r.QueryParam.Set(param, value)
+ return r
+}
+
+// SetQueryParams method sets multiple parameters and its values at one go in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetQueryParam(p, v)
+ }
+ return r
+}
+
+// SetQueryParamsFromValues method appends multiple parameters with multi-value
+// (`url.Values`) at one go in the current request. It will be formed as
+// query string for the request.
+//
+// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
+// client.R().
+// SetQueryParamsFromValues(url.Values{
+// "status": []string{"pending", "approved", "open"},
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ return r
+}
+
+// SetQueryString method provides ability to use string as an input to set URL query string for the request.
+//
+// Using String as an input
+// client.R().
+// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+func (r *Request) SetQueryString(query string) *Request {
+ params, err := url.ParseQuery(strings.TrimSpace(query))
+ if err == nil {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ } else {
+ r.client.log.Errorf("%v", err)
+ }
+ return r
+}
+
+// SetFormData method sets Form parameters and their values in the current request.
+// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
+// `application/x-www-form-urlencoded`.
+// client.R().
+// SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r.FormData.Set(k, v)
+ }
+ return r
+}
+
+// SetFormDataFromValues method appends multiple form parameters with multi-value
+// (`url.Values`) at one go in the current request.
+// client.R().
+// SetFormDataFromValues(url.Values{
+// "search_criteria": []string{"book", "glass", "pencil"},
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormDataFromValues(data url.Values) *Request {
+ for k, v := range data {
+ for _, kv := range v {
+ r.FormData.Add(k, kv)
+ }
+ }
+ return r
+}
+
+// SetBody method sets the request body for the request. It supports various realtime needs as easy.
+// We can say its quite handy or powerful. Supported request body data types is `string`,
+// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
+// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
+//
+// Note: `io.Reader` is processed as bufferless mode while sending request.
+//
+// For Example: Struct as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(User{
+// Username: "jeeva@myjeeva.com",
+// Password: "welcome2resty",
+// })
+//
+// Map as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(map[string]interface{}{
+// "username": "jeeva@myjeeva.com",
+// "password": "welcome2resty",
+// "address": &Address{
+// Address1: "1111 This is my street",
+// Address2: "Apt 201",
+// City: "My City",
+// State: "My State",
+// ZipCode: 00000,
+// },
+// })
+//
+// String as a body input. Suitable for any need as a string input.
+// client.R().
+// SetBody(`{
+// "username": "jeeva@getrightcare.com",
+// "password": "admin"
+// }`)
+//
+// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
+// client.R().
+// SetBody([]byte("This is my raw request, sent as-is"))
+func (r *Request) SetBody(body interface{}) *Request {
+ r.Body = body
+ return r
+}
+
+// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
+// if response status code is between 200 and 299 and content type either JSON or XML.
+//
+// Note: Result object can be pointer or non-pointer.
+// client.R().SetResult(&AuthToken{})
+// // OR
+// client.R().SetResult(AuthToken{})
+//
+// Accessing a result value from response instance.
+// response.Result().(*AuthToken)
+func (r *Request) SetResult(res interface{}) *Request {
+ r.Result = getPointer(res)
+ return r
+}
+
+// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
+// if response status code is greater than 399 and content type either JSON or XML.
+//
+// Note: Error object can be pointer or non-pointer.
+// client.R().SetError(&AuthError{})
+// // OR
+// client.R().SetError(AuthError{})
+//
+// Accessing a error value from response instance.
+// response.Error().(*AuthError)
+func (r *Request) SetError(err interface{}) *Request {
+ r.Error = getPointer(err)
+ return r
+}
+
+// SetFile method is to set single file field name and its path for multipart upload.
+// client.R().
+// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
+func (r *Request) SetFile(param, filePath string) *Request {
+ r.isMultiPart = true
+ r.FormData.Set("@"+param, filePath)
+ return r
+}
+
+// SetFiles method is to set multiple file field name and its path for multipart upload.
+// client.R().
+// SetFiles(map[string]string{
+// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
+// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
+// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
+// })
+func (r *Request) SetFiles(files map[string]string) *Request {
+ r.isMultiPart = true
+ for f, fp := range files {
+ r.FormData.Set("@"+f, fp)
+ }
+ return r
+}
+
+// SetFileReader method is to set single file using io.Reader for multipart upload.
+// client.R().
+// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
+// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
+func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFiles = append(r.multipartFiles, &File{
+ Name: fileName,
+ ParamName: param,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFormData method allows simple form data to be attached to the request as `multipart:form-data`
+func (r *Request) SetMultipartFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r = r.SetMultipartField(k, "", "", strings.NewReader(v))
+ }
+
+ return r
+}
+
+// SetMultipartField method is to set custom data using io.Reader for multipart upload.
+func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, &MultipartField{
+ Param: param,
+ FileName: fileName,
+ ContentType: contentType,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
+//
+// For Example:
+// client.R().SetMultipartFields(
+// &resty.MultipartField{
+// Param: "uploadManifest1",
+// FileName: "upload-file-1.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
+// },
+// &resty.MultipartField{
+// Param: "uploadManifest2",
+// FileName: "upload-file-2.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
+// })
+//
+// If you have slice already, then simply call-
+// client.R().SetMultipartFields(fields...)
+func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, fields...)
+ return r
+}
+
+// SetContentLength method sets the HTTP header `Content-Length` value for current request.
+// By default Resty won't set `Content-Length`. Also you have an option to enable for every
+// request.
+//
+// See `Client.SetContentLength`
+// client.R().SetContentLength(true)
+func (r *Request) SetContentLength(l bool) *Request {
+ r.setContentLength = l
+ return r
+}
+
+// SetBasicAuth method sets the basic authentication header in the current HTTP request.
+//
+// For Example:
+// Authorization: Basic
+//
+// To set the header for username "go-resty" and password "welcome"
+// client.R().SetBasicAuth("go-resty", "welcome")
+//
+// This method overrides the credentials set by method `Client.SetBasicAuth`.
+func (r *Request) SetBasicAuth(username, password string) *Request {
+ r.UserInfo = &User{Username: username, Password: password}
+ return r
+}
+
+// SetAuthToken method sets the auth token header(Default Scheme: Bearer) in the current HTTP request. Header example:
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This method overrides the Auth token set by method `Client.SetAuthToken`.
+func (r *Request) SetAuthToken(token string) *Request {
+ r.Token = token
+ return r
+}
+
+// SetAuthScheme method sets the auth token scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.R().SetAuthScheme("OAuth")
+//
+// This auth header scheme gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about Auth schemes can be found in RFC7235 which is linked to below along with the page containing
+// the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// This method overrides the Authorization scheme set by method `Client.SetAuthScheme`.
+func (r *Request) SetAuthScheme(scheme string) *Request {
+ r.AuthScheme = scheme
+ return r
+}
+
+// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
+// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
+// If is it relative path then output file goes under the output directory, as mentioned
+// in the `Client.SetOutputDirectory`.
+// client.R().
+// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
+// Get("http://bit.ly/1LouEKr")
+//
+// Note: In this scenario `Response.Body` might be nil.
+func (r *Request) SetOutput(file string) *Request {
+ r.outputFile = file
+ r.isSaveResponse = true
+ return r
+}
+
+// SetSRV method sets the details to query the service SRV record and execute the
+// request.
+// client.R().
+// SetSRV(SRVRecord{"web", "testservice.com"}).
+// Get("/get")
+func (r *Request) SetSRV(srv *SRVRecord) *Request {
+ r.SRV = srv
+ return r
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (r *Request) SetDoNotParseResponse(parse bool) *Request {
+ r.notParseResponse = parse
+ return r
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty current request instance.
+// client.R().SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParam(param, value string) *Request {
+ r.PathParams[param] = value
+ return r
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetPathParam(p, v)
+ }
+ return r
+}
+
+// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
+// when `Content-Type` response header is unavailable.
+func (r *Request) ExpectContentType(contentType string) *Request {
+ r.fallbackContentType = contentType
+ return r
+}
+
+// ForceContentType method provides a strong sense of response `Content-Type` for automatic unmarshalling.
+// Resty gives this a higher priority than the `Content-Type` response header. This means that if both
+// `Request.ForceContentType` is set and the response `Content-Type` is available, `ForceContentType` will win.
+func (r *Request) ForceContentType(contentType string) *Request {
+ r.forceContentType = contentType
+ return r
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (r *Request) SetJSONEscapeHTML(b bool) *Request {
+ r.jsonEscapeHTML = b
+ return r
+}
+
+// SetCookie method appends a single cookie in the current request instance.
+// client.R().SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookie(hc *http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, hc)
+ return r
+}
+
+// SetCookies method sets an array of cookies in the current request instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty's current request
+// client.R().SetCookies(cookies)
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookies(rs []*http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, rs...)
+ return r
+}
+
+// AddRetryCondition method adds a retry condition function to the request's
+// array of functions that are checked to determine if the request is retried.
+// The request will retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are checked before all retry conditions of the client.
+//
+// Since v2.7.0
+func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request {
+ r.retryConditions = append(r.retryConditions, condition)
+ return r
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP request tracing
+//_______________________________________________________________________
+
+// EnableTrace method enables trace for the current request
+// using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New()
+//
+// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// See `Client.EnableTrace` available too to get trace info for all requests.
+//
+// Since v2.0.0
+func (r *Request) EnableTrace() *Request {
+ r.trace = true
+ return r
+}
+
+// TraceInfo method returns the trace info for the request.
+// If either the Client or Request EnableTrace function has not been called
+// prior to the request being made, an empty TraceInfo object will be returned.
+//
+// Since v2.0.0
+func (r *Request) TraceInfo() TraceInfo {
+ ct := r.clientTrace
+
+ if ct == nil {
+ return TraceInfo{}
+ }
+
+ ti := TraceInfo{
+ DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
+ TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
+ ServerTime: ct.gotFirstResponseByte.Sub(ct.gotConn),
+ IsConnReused: ct.gotConnInfo.Reused,
+ IsConnWasIdle: ct.gotConnInfo.WasIdle,
+ ConnIdleTime: ct.gotConnInfo.IdleTime,
+ RequestAttempt: r.Attempt,
+ }
+
+ // Calculate the total time accordingly,
+ // when connection is reused
+ if ct.gotConnInfo.Reused {
+ ti.TotalTime = ct.endTime.Sub(ct.getConn)
+ } else {
+ ti.TotalTime = ct.endTime.Sub(ct.dnsStart)
+ }
+
+ // Only calculate on successful connections
+ if !ct.connectDone.IsZero() {
+ ti.TCPConnTime = ct.connectDone.Sub(ct.dnsDone)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotConn.IsZero() {
+ ti.ConnTime = ct.gotConn.Sub(ct.getConn)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotFirstResponseByte.IsZero() {
+ ti.ResponseTime = ct.endTime.Sub(ct.gotFirstResponseByte)
+ }
+
+ // Capture remote address info when connection is non-nil
+ if ct.gotConnInfo.Conn != nil {
+ ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr()
+ }
+
+ return ti
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP verb method starts here
+//_______________________________________________________________________
+
+// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
+func (r *Request) Get(url string) (*Response, error) {
+ return r.Execute(MethodGet, url)
+}
+
+// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
+func (r *Request) Head(url string) (*Response, error) {
+ return r.Execute(MethodHead, url)
+}
+
+// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
+func (r *Request) Post(url string) (*Response, error) {
+ return r.Execute(MethodPost, url)
+}
+
+// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
+func (r *Request) Put(url string) (*Response, error) {
+ return r.Execute(MethodPut, url)
+}
+
+// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
+func (r *Request) Delete(url string) (*Response, error) {
+ return r.Execute(MethodDelete, url)
+}
+
+// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
+func (r *Request) Options(url string) (*Response, error) {
+ return r.Execute(MethodOptions, url)
+}
+
+// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
+func (r *Request) Patch(url string) (*Response, error) {
+ return r.Execute(MethodPatch, url)
+}
+
+// Send method performs the HTTP request using the method and URL already defined
+// for current `Request`.
+// req := client.R()
+// req.Method = resty.GET
+// req.URL = "http://httpbin.org/get"
+// resp, err := client.R().Send()
+func (r *Request) Send() (*Response, error) {
+ return r.Execute(r.Method, r.URL)
+}
+
+// Execute method performs the HTTP request with given HTTP method and URL
+// for current `Request`.
+// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
+func (r *Request) Execute(method, url string) (*Response, error) {
+ var addrs []*net.SRV
+ var resp *Response
+ var err error
+
+ if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
+ // No OnError hook here since this is a request validation error
+ return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
+ }
+
+ if r.SRV != nil {
+ _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
+ if err != nil {
+ r.client.onErrorHooks(r, nil, err)
+ return nil, err
+ }
+ }
+
+ r.Method = method
+ r.URL = r.selectAddr(addrs, url, 0)
+
+ if r.client.RetryCount == 0 {
+ r.Attempt = 1
+ resp, err = r.client.execute(r)
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+ return resp, unwrapNoRetryErr(err)
+ }
+
+ err = Backoff(
+ func() (*Response, error) {
+ r.Attempt++
+
+ r.URL = r.selectAddr(addrs, url, r.Attempt)
+
+ resp, err = r.client.execute(r)
+ if err != nil {
+ r.client.log.Errorf("%v, Attempt %v", err, r.Attempt)
+ }
+
+ return resp, err
+ },
+ Retries(r.client.RetryCount),
+ WaitTime(r.client.RetryWaitTime),
+ MaxWaitTime(r.client.RetryMaxWaitTime),
+ RetryConditions(append(r.retryConditions, r.client.RetryConditions...)),
+ RetryHooks(r.client.RetryHooks),
+ )
+
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+
+ return resp, unwrapNoRetryErr(err)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// SRVRecord struct
+//_______________________________________________________________________
+
+// SRVRecord struct holds the data to query the SRV record for the
+// following service.
+type SRVRecord struct {
+ Service string
+ Domain string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Unexported methods
+//_______________________________________________________________________
+
+func (r *Request) fmtBodyString(sl int64) (body string) {
+ body = "***** NO CONTENT *****"
+ if !isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
+ return
+ }
+
+ if _, ok := r.Body.(io.Reader); ok {
+ body = "***** BODY IS io.Reader *****"
+ return
+ }
+
+ // multipart or form-data
+ if r.isMultiPart || r.isFormData {
+ bodySize := int64(r.bodyBuf.Len())
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ return
+ }
+ body = r.bodyBuf.String()
+ return
+ }
+
+ // request body data
+ if r.Body == nil {
+ return
+ }
+ var prtBodyBytes []byte
+ var err error
+
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ if canJSONMarshal(contentType, kind) {
+ prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ")
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
+ } else if b, ok := r.Body.(string); ok {
+ if IsJSONType(contentType) {
+ bodyBytes := []byte(b)
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ if err = json.Indent(out, bodyBytes, "", " "); err == nil {
+ prtBodyBytes = out.Bytes()
+ }
+ } else {
+ body = b
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ body = fmt.Sprintf("***** BODY IS byte(s) (size - %d) *****", len(b))
+ return
+ }
+
+ if prtBodyBytes != nil && err == nil {
+ body = string(prtBodyBytes)
+ }
+
+ if len(body) > 0 {
+ bodySize := int64(len([]byte(body)))
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ }
+ }
+
+ return
+}
+
+func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
+ if addrs == nil {
+ return path
+ }
+
+ idx := attempt % len(addrs)
+ domain := strings.TrimRight(addrs[idx].Target, ".")
+ path = strings.TrimLeft(path, "/")
+
+ return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
+}
+
+func (r *Request) initValuesMap() {
+ if r.values == nil {
+ r.values = make(map[string]interface{})
+ }
+}
+
+var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
+ buf := acquireBuffer()
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ if err := encoder.Encode(v); err != nil {
+ releaseBuffer(buf)
+ return nil, err
+ }
+
+ return buf, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/response.go b/vendor/github.com/go-resty/resty/v2/response.go
new file mode 100644
index 00000000..8ae0e10b
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/response.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response struct and methods
+//_______________________________________________________________________
+
+// Response struct holds response values of executed request.
+type Response struct {
+ Request *Request
+ RawResponse *http.Response
+
+ body []byte
+ size int64
+ receivedAt time.Time
+}
+
+// Body method returns HTTP response as []byte array for the executed request.
+//
+// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
+func (r *Response) Body() []byte {
+ if r.RawResponse == nil {
+ return []byte{}
+ }
+ return r.body
+}
+
+// Status method returns the HTTP status string for the executed request.
+// Example: 200 OK
+func (r *Response) Status() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Status
+}
+
+// StatusCode method returns the HTTP status code for the executed request.
+// Example: 200
+func (r *Response) StatusCode() int {
+ if r.RawResponse == nil {
+ return 0
+ }
+ return r.RawResponse.StatusCode
+}
+
+// Proto method returns the HTTP response protocol used for the request.
+func (r *Response) Proto() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Proto
+}
+
+// Result method returns the response value as an object if it has one
+func (r *Response) Result() interface{} {
+ return r.Request.Result
+}
+
+// Error method returns the error object if it has one
+func (r *Response) Error() interface{} {
+ return r.Request.Error
+}
+
+// Header method returns the response headers
+func (r *Response) Header() http.Header {
+ if r.RawResponse == nil {
+ return http.Header{}
+ }
+ return r.RawResponse.Header
+}
+
+// Cookies method to access all the response cookies
+func (r *Response) Cookies() []*http.Cookie {
+ if r.RawResponse == nil {
+ return make([]*http.Cookie, 0)
+ }
+ return r.RawResponse.Cookies()
+}
+
+// String method returns the body of the server response as String.
+func (r *Response) String() string {
+ if r.body == nil {
+ return ""
+ }
+ return strings.TrimSpace(string(r.body))
+}
+
+// Time method returns the time of HTTP response time that from request we sent and received a request.
+//
+// See `Response.ReceivedAt` to know when client received response and see `Response.Request.Time` to know
+// when client sent a request.
+func (r *Response) Time() time.Duration {
+ if r.Request.clientTrace != nil {
+ return r.Request.TraceInfo().TotalTime
+ }
+ return r.receivedAt.Sub(r.Request.Time)
+}
+
+// ReceivedAt method returns when response got received from server for the request.
+func (r *Response) ReceivedAt() time.Time {
+ return r.receivedAt
+}
+
+// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
+// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
+// at the client end. You will get actual size of the http response.
+func (r *Response) Size() int64 {
+ return r.size
+}
+
+// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
+// option otherwise you get an error as `read err: http: read on closed response body`.
+//
+// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
+// Basically you have taken over the control of response parsing from `Resty`.
+func (r *Response) RawBody() io.ReadCloser {
+ if r.RawResponse == nil {
+ return nil
+ }
+ return r.RawResponse.Body
+}
+
+// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
+func (r *Response) IsSuccess() bool {
+ return r.StatusCode() > 199 && r.StatusCode() < 300
+}
+
+// IsError method returns true if HTTP status `code >= 400` otherwise false.
+func (r *Response) IsError() bool {
+ return r.StatusCode() > 399
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Unexported methods
+//_______________________________________________________________________
+
+func (r *Response) setReceivedAt() {
+ r.receivedAt = time.Now()
+ if r.Request.clientTrace != nil {
+ r.Request.clientTrace.endTime = r.receivedAt
+ }
+}
+
+func (r *Response) fmtBodyString(sl int64) string {
+ if r.body != nil {
+ if int64(len(r.body)) > sl {
+ return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
+ }
+ ct := r.Header().Get(hdrContentTypeKey)
+ if IsJSONType(ct) {
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ err := json.Indent(out, r.body, "", " ")
+ if err != nil {
+ return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
+ }
+ return out.String()
+ }
+ return r.String()
+ }
+
+ return "***** NO CONTENT *****"
+}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
new file mode 100644
index 00000000..6f9c8b4c
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+// Package resty provides Simple HTTP and REST client library for Go.
+package resty
+
+import (
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// Version # of resty
+const Version = "2.7.0"
+
+// New method creates a new Resty client.
+func New() *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ })
+}
+
+// NewWithClient method creates a new Resty client with given `http.Client`.
+func NewWithClient(hc *http.Client) *Client {
+ return createClient(hc)
+}
+
+// NewWithLocalAddr method creates a new Resty client with given Local Address
+// to dial from.
+func NewWithLocalAddr(localAddr net.Addr) *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ Transport: createTransport(localAddr),
+ })
+}
diff --git a/vendor/github.com/go-resty/resty/v2/retry.go b/vendor/github.com/go-resty/resty/v2/retry.go
new file mode 100644
index 00000000..00b8514a
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/retry.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultWaitTime = time.Duration(100) * time.Millisecond
+ defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
+)
+
+type (
+ // Option is to create convenient retry options like wait time, max retries, etc.
+ Option func(*Options)
+
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ RetryConditionFunc func(*Response, error) bool
+
+ // OnRetryFunc is for side-effecting functions triggered on retry
+ OnRetryFunc func(*Response, error)
+
+ // RetryAfterFunc returns time to wait before retry
+ // For example, it can parse HTTP Retry-After header
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ // Non-nil error is returned if it is found that request is not retryable
+ // (0, nil) is a special result means 'use default algorithm'
+ RetryAfterFunc func(*Client, *Response) (time.Duration, error)
+
+ // Options struct is used to hold retry settings.
+ Options struct {
+ maxRetries int
+ waitTime time.Duration
+ maxWaitTime time.Duration
+ retryConditions []RetryConditionFunc
+ retryHooks []OnRetryFunc
+ }
+)
+
+// Retries sets the max number of retries
+func Retries(value int) Option {
+ return func(o *Options) {
+ o.maxRetries = value
+ }
+}
+
+// WaitTime sets the default wait time to sleep between requests
+func WaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.waitTime = value
+ }
+}
+
+// MaxWaitTime sets the max wait time to sleep between requests
+func MaxWaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.maxWaitTime = value
+ }
+}
+
+// RetryConditions sets the conditions that will be checked for retry.
+func RetryConditions(conditions []RetryConditionFunc) Option {
+ return func(o *Options) {
+ o.retryConditions = conditions
+ }
+}
+
+// RetryHooks sets the hooks that will be executed after each retry
+func RetryHooks(hooks []OnRetryFunc) Option {
+ return func(o *Options) {
+ o.retryHooks = hooks
+ }
+}
+
+// Backoff retries with increasing timeout duration up until X amount of retries
+// (Default is 3 attempts, Override with option Retries(n))
+func Backoff(operation func() (*Response, error), options ...Option) error {
+ // Defaults
+ opts := Options{
+ maxRetries: defaultMaxRetries,
+ waitTime: defaultWaitTime,
+ maxWaitTime: defaultMaxWaitTime,
+ retryConditions: []RetryConditionFunc{},
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+
+ var (
+ resp *Response
+ err error
+ )
+
+ for attempt := 0; attempt <= opts.maxRetries; attempt++ {
+ resp, err = operation()
+ ctx := context.Background()
+ if resp != nil && resp.Request.ctx != nil {
+ ctx = resp.Request.ctx
+ }
+ if ctx.Err() != nil {
+ return err
+ }
+
+ err1 := unwrapNoRetryErr(err) // raw error, it used for return users callback.
+ needsRetry := err != nil && err == err1 // retry on a few operation errors by default
+
+ for _, condition := range opts.retryConditions {
+ needsRetry = condition(resp, err1)
+ if needsRetry {
+ break
+ }
+ }
+
+ if !needsRetry {
+ return err
+ }
+
+ for _, hook := range opts.retryHooks {
+ hook(resp, err)
+ }
+
+ // Don't need to wait when no retries left.
+ // Still run retry hooks even on last retry to keep compatibility.
+ if attempt == opts.maxRetries {
+ return err
+ }
+
+ waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ return err
+ }
+
+ select {
+ case <-time.After(waitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ return err
+}
+
+func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
+ const maxInt = 1<<31 - 1 // max int for arch 386
+ if max < 0 {
+ max = maxInt
+ }
+ if resp == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ retryAfterFunc := resp.Request.client.RetryAfter
+
+ // Check for custom callback
+ if retryAfterFunc == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ result, err := retryAfterFunc(resp.Request.client, resp)
+ if err != nil {
+ return 0, err // i.e. 'API quota exceeded'
+ }
+ if result == 0 {
+ return jitterBackoff(min, max, attempt), nil
+ }
+ if result < 0 || max < result {
+ result = max
+ }
+ if result < min {
+ result = min
+ }
+ return result, nil
+}
+
+// Return capped exponential backoff with jitter
+// http://www.awsarchitectureblog.com/2015/03/backoff.html
+func jitterBackoff(min, max time.Duration, attempt int) time.Duration {
+ base := float64(min)
+ capLevel := float64(max)
+
+ temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
+ ri := time.Duration(temp / 2)
+ result := randDuration(ri)
+
+ if result < min {
+ result = min
+ }
+
+ return result
+}
+
+var rnd = newRnd()
+var rndMu sync.Mutex
+
+func randDuration(center time.Duration) time.Duration {
+ rndMu.Lock()
+ defer rndMu.Unlock()
+
+ var ri = int64(center)
+ var jitter = rnd.Int63n(ri)
+ return time.Duration(math.Abs(float64(ri + jitter)))
+}
+
+func newRnd() *rand.Rand {
+ var seed = time.Now().UnixNano()
+ var src = rand.NewSource(seed)
+ return rand.New(src)
+}
diff --git a/vendor/github.com/go-resty/resty/v2/trace.go b/vendor/github.com/go-resty/resty/v2/trace.go
new file mode 100644
index 00000000..23cf7033
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/trace.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http/httptrace"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// TraceInfo struct
+//_______________________________________________________________________
+
+// TraceInfo struct is used provide request trace info such as DNS lookup
+// duration, Connection obtain duration, Server processing duration, etc.
+//
+// Since v2.0.0
+type TraceInfo struct {
+ // DNSLookup is a duration that transport took to perform
+ // DNS lookup.
+ DNSLookup time.Duration
+
+ // ConnTime is a duration that took to obtain a successful connection.
+ ConnTime time.Duration
+
+ // TCPConnTime is a duration that took to obtain the TCP connection.
+ TCPConnTime time.Duration
+
+ // TLSHandshake is a duration that TLS handshake took place.
+ TLSHandshake time.Duration
+
+ // ServerTime is a duration that server took to respond first byte.
+ ServerTime time.Duration
+
+ // ResponseTime is a duration since first response byte from server to
+ // request completion.
+ ResponseTime time.Duration
+
+ // TotalTime is a duration that total request took end-to-end.
+ TotalTime time.Duration
+
+ // IsConnReused is whether this connection has been previously
+ // used for another HTTP request.
+ IsConnReused bool
+
+ // IsConnWasIdle is whether this connection was obtained from an
+ // idle pool.
+ IsConnWasIdle bool
+
+ // ConnIdleTime is a duration how long the connection was previously
+ // idle, if IsConnWasIdle is true.
+ ConnIdleTime time.Duration
+
+ // RequestAttempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ RequestAttempt int
+
+ // RemoteAddr returns the remote network address.
+ RemoteAddr net.Addr
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// ClientTrace struct and its methods
+//_______________________________________________________________________
+
+// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
+// with same naming for easy understanding. Plus additional insights
+// Request.
+type clientTrace struct {
+ getConn time.Time
+ dnsStart time.Time
+ dnsDone time.Time
+ connectDone time.Time
+ tlsHandshakeStart time.Time
+ tlsHandshakeDone time.Time
+ gotConn time.Time
+ gotFirstResponseByte time.Time
+ endTime time.Time
+ gotConnInfo httptrace.GotConnInfo
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Trace unexported methods
+//_______________________________________________________________________
+
+func (t *clientTrace) createContext(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(
+ ctx,
+ &httptrace.ClientTrace{
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ t.dnsStart = time.Now()
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ t.dnsDone = time.Now()
+ },
+ ConnectStart: func(_, _ string) {
+ if t.dnsDone.IsZero() {
+ t.dnsDone = time.Now()
+ }
+ if t.dnsStart.IsZero() {
+ t.dnsStart = t.dnsDone
+ }
+ },
+ ConnectDone: func(net, addr string, err error) {
+ t.connectDone = time.Now()
+ },
+ GetConn: func(_ string) {
+ t.getConn = time.Now()
+ },
+ GotConn: func(ci httptrace.GotConnInfo) {
+ t.gotConn = time.Now()
+ t.gotConnInfo = ci
+ },
+ GotFirstResponseByte: func() {
+ t.gotFirstResponseByte = time.Now()
+ },
+ TLSHandshakeStart: func() {
+ t.tlsHandshakeStart = time.Now()
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+ t.tlsHandshakeDone = time.Now()
+ },
+ },
+ )
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport.go b/vendor/github.com/go-resty/resty/v2/transport.go
new file mode 100644
index 00000000..e15b48c5
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport.go
@@ -0,0 +1,35 @@
+// +build go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport112.go b/vendor/github.com/go-resty/resty/v2/transport112.go
new file mode 100644
index 00000000..fbbbc591
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport112.go
@@ -0,0 +1,34 @@
+// +build !go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
new file mode 100644
index 00000000..1d563bef
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/util.go
@@ -0,0 +1,391 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Logger interface
+//_______________________________________________________________________
+
+// Logger interface is to abstract the logging from Resty. Gives control to
+// the Resty users, choice of the logger.
+type Logger interface {
+ Errorf(format string, v ...interface{})
+ Warnf(format string, v ...interface{})
+ Debugf(format string, v ...interface{})
+}
+
+func createLogger() *logger {
+ l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
+ return l
+}
+
+var _ Logger = (*logger)(nil)
+
+type logger struct {
+ l *log.Logger
+}
+
+func (l *logger) Errorf(format string, v ...interface{}) {
+ l.output("ERROR RESTY "+format, v...)
+}
+
+func (l *logger) Warnf(format string, v ...interface{}) {
+ l.output("WARN RESTY "+format, v...)
+}
+
+func (l *logger) Debugf(format string, v ...interface{}) {
+ l.output("DEBUG RESTY "+format, v...)
+}
+
+func (l *logger) output(format string, v ...interface{}) {
+ if len(v) == 0 {
+ l.l.Print(format)
+ return
+ }
+ l.l.Printf(format, v...)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Helper methods
+//_______________________________________________________________________
+
+// IsStringEmpty method tells whether given string is empty or not
+func IsStringEmpty(str string) bool {
+ return len(strings.TrimSpace(str)) == 0
+}
+
+// DetectContentType method is used to figure out `Request.Body` content type for request header
+func DetectContentType(body interface{}) string {
+ contentType := plainTextType
+ kind := kindOf(body)
+ switch kind {
+ case reflect.Struct, reflect.Map:
+ contentType = jsonContentType
+ case reflect.String:
+ contentType = plainTextType
+ default:
+ if b, ok := body.([]byte); ok {
+ contentType = http.DetectContentType(b)
+ } else if kind == reflect.Slice {
+ contentType = jsonContentType
+ }
+ }
+
+ return contentType
+}
+
+// IsJSONType method is to check JSON content type or not
+func IsJSONType(ct string) bool {
+ return jsonCheck.MatchString(ct)
+}
+
+// IsXMLType method is to check XML content type or not
+func IsXMLType(ct string) bool {
+ return xmlCheck.MatchString(ct)
+}
+
+// Unmarshalc content into object from JSON or XML
+func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
+ if IsJSONType(ct) {
+ err = c.JSONUnmarshal(b, d)
+ } else if IsXMLType(ct) {
+ err = c.XMLUnmarshal(b, d)
+ }
+
+ return
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// RequestLog and ResponseLog type
+//_______________________________________________________________________
+
+// RequestLog struct is used to collected information from resty request
+// instance for debug logging. It sent to request log callback before resty
+// actually logs the information.
+type RequestLog struct {
+ Header http.Header
+ Body string
+}
+
+// ResponseLog struct is used to collected information from resty response
+// instance for debug logging. It sent to response log callback before resty
+// actually logs the information.
+type ResponseLog struct {
+ Header http.Header
+ Body string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+// way to disable the HTML escape as opt-in
+func jsonMarshal(c *Client, r *Request, d interface{}) (*bytes.Buffer, error) {
+ if !r.jsonEscapeHTML || !c.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ }
+
+ data, err := c.JSONMarshal(d)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := acquireBuffer()
+ _, _ = buf.Write(data)
+ return buf, nil
+}
+
+func firstNonEmpty(v ...string) string {
+ for _, s := range v {
+ if !IsStringEmpty(s) {
+ return s
+ }
+ }
+ return ""
+}
+
+var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+
+func escapeQuotes(s string) string {
+ return quoteEscaper.Replace(s)
+}
+
+func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
+ hdr := make(textproto.MIMEHeader)
+
+ var contentDispositionValue string
+ if IsStringEmpty(fileName) {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
+ } else {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ param, escapeQuotes(fileName))
+ }
+ hdr.Set("Content-Disposition", contentDispositionValue)
+
+ if !IsStringEmpty(contentType) {
+ hdr.Set(hdrContentTypeKey, contentType)
+ }
+ return hdr
+}
+
+func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
+ partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, mf.Reader)
+ return err
+}
+
+func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
+ // Auto detect actual multipart content type
+ cbuf := make([]byte, 512)
+ size, err := r.Read(cbuf)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ if err != nil {
+ return err
+ }
+
+ if _, err = partWriter.Write(cbuf[:size]); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, r)
+ return err
+}
+
+func addFile(w *multipart.Writer, fieldName, path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer closeq(file)
+ return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
+}
+
+func addFileReader(w *multipart.Writer, f *File) error {
+ return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
+}
+
+func getPointer(v interface{}) interface{} {
+ vv := valueOf(v)
+ if vv.Kind() == reflect.Ptr {
+ return v
+ }
+ return reflect.New(vv.Type()).Interface()
+}
+
+func isPayloadSupported(m string, allowMethodGet bool) bool {
+ return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
+}
+
+func typeOf(i interface{}) reflect.Type {
+ return indirect(valueOf(i)).Type()
+}
+
+func valueOf(i interface{}) reflect.Value {
+ return reflect.ValueOf(i)
+}
+
+func indirect(v reflect.Value) reflect.Value {
+ return reflect.Indirect(v)
+}
+
+func kindOf(v interface{}) reflect.Kind {
+ return typeOf(v).Kind()
+}
+
+func createDirectory(dir string) (err error) {
+ if _, err = os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func canJSONMarshal(contentType string, kind reflect.Kind) bool {
+ return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
+}
+
+func functionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func acquireBuffer() *bytes.Buffer {
+ return bufPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(buf *bytes.Buffer) {
+ if buf != nil {
+ buf.Reset()
+ bufPool.Put(buf)
+ }
+}
+
+// requestBodyReleaser wraps requests's body and implements custom Close for it.
+// The Close method closes original body and releases request body back to sync.Pool.
+type requestBodyReleaser struct {
+ releaseOnce sync.Once
+ reqBuf *bytes.Buffer
+ io.ReadCloser
+}
+
+func newRequestBodyReleaser(respBody io.ReadCloser, reqBuf *bytes.Buffer) io.ReadCloser {
+ if reqBuf == nil {
+ return respBody
+ }
+
+ return &requestBodyReleaser{
+ reqBuf: reqBuf,
+ ReadCloser: respBody,
+ }
+}
+
+func (rr *requestBodyReleaser) Close() error {
+ err := rr.ReadCloser.Close()
+ rr.releaseOnce.Do(func() {
+ releaseBuffer(rr.reqBuf)
+ })
+
+ return err
+}
+
+func closeq(v interface{}) {
+ if c, ok := v.(io.Closer); ok {
+ silently(c.Close())
+ }
+}
+
+func silently(_ ...interface{}) {}
+
+func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
+ str := make([]string, 0, len(hdrs))
+ for _, k := range sortHeaderKeys(hdrs) {
+ var v string
+ if k == "Cookie" {
+ cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
+ if c.GetClient().Jar != nil {
+ for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
+ if cv != "" {
+ cv = cv + "; " + c.String()
+ } else {
+ cv = c.String()
+ }
+ }
+ }
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
+ } else {
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
+ }
+ if v != "" {
+ str = append(str, "\t"+v)
+ }
+ }
+ return strings.Join(str, "\n")
+}
+
+func sortHeaderKeys(hdrs http.Header) []string {
+ keys := make([]string, 0, len(hdrs))
+ for key := range hdrs {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func copyHeaders(hdrs http.Header) http.Header {
+ nh := http.Header{}
+ for k, v := range hdrs {
+ nh[k] = v
+ }
+ return nh
+}
+
+type noRetryErr struct {
+ err error
+}
+
+func (e *noRetryErr) Error() string {
+ return e.err.Error()
+}
+
+func wrapNoRetryErr(err error) error {
+ if err != nil {
+ err = &noRetryErr{err: err}
+ }
+ return err
+}
+
+func unwrapNoRetryErr(err error) error {
+ if e, ok := err.(*noRetryErr); ok {
+ err = e.err
+ }
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 00000000..2de28da1
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 00000000..50afa2c8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,117 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Alex Snast
+Alexey Palazhchenko
+Andrew Reid
+Animesh Ray
+Arne Hormann
+Ariel Mashraki
+Asta Xie
+Bulat Gaifullin
+Caine Jette
+Carlos Nieto
+Chris Moos
+Craig Wilson
+Daniel Montoya
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Erwan Martin
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hajime Nakagami
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+Huyiguang
+ICHINOSE Shogo
+Ilia Cimpoes
+INADA Naoki
+Jacek Szwec
+James Harr
+Jeff Hodges
+Jeffrey Charles
+Jerome Meyer
+Jiajia Zhong
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Li
+Justin Nuß
+Kamil Dziedzic
+Kei Kamikawa
+Kevin Malachowski
+Kieron Woodhouse
+Lennart Rudolph
+Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
+Luca Looz
+Lucas Liu
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nathanial Murphy
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
+Runrioter Wung
+Sho Iizuka
+Sho Ikeda
+Shuode Li
+Simon J Mudd
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Steven Hartland
+Tan Jinhua <312841925 at qq.com>
+Thomas Wodarek
+Tim Ruffles
+Tom Jenkinson
+Vladimir Kovpak
+Vladyslav Zhelezniak
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Xuehong Chan
+Zhenye Xie
+Zhixin Wen
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+DigitalOcean Inc.
+Facebook Inc.
+GitHub Inc.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Multiplay Ltd.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
+Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 00000000..72a738ed
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,232 @@
+## Version 1.6 (2021-04-01)
+
+Changes:
+
+ - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
+ - `NullTime` is deprecated (#960, #1144)
+ - Reduce allocations when building SET command (#1111)
+ - Performance improvement for time formatting (#1118)
+ - Performance improvement for time parsing (#1098, #1113)
+
+New Features:
+
+ - Implement `driver.Validator` interface (#1106, #1174)
+ - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
+ - Add `json.RawMessage` for converter and prepared statement (#1059)
+ - Interpolate `json.RawMessage` as `string` (#1058)
+ - Implements `CheckNamedValue` (#1090)
+
+Bugfixes:
+
+ - Stop rounding times (#1121, #1172)
+ - Put zero filler into the SSL handshake packet (#1066)
+ - Fix checking cancelled connections back into the connection pool (#1095)
+ - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
+
+
+## Version 1.5 (2020-01-07)
+
+Changes:
+
+ - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
+ - Improve buffer handling (#890)
+ - Document potentially insecure TLS configs (#901)
+ - Use a double-buffering scheme to prevent data races (#943)
+ - Pass uint64 values without converting them to string (#838, #955)
+ - Update collations and make utf8mb4 default (#877, #1054)
+ - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
+ - Removed CloudSQL support (#993, #1007)
+ - Add Go Module support (#1003)
+
+New Features:
+
+ - Implement support of optional TLS (#900)
+ - Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
+ - Implement Connector Interface (#941, #958, #1020, #1035)
+
+Bugfixes:
+
+ - Mark connections as bad on error during ping (#875)
+ - Mark connections as bad on error during dial (#867)
+ - Fix connection leak caused by rapid context cancellation (#1024)
+ - Mark connections as bad on error during Conn.Prepare (#1030)
+
+
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 00000000..14e2f777
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 00000000..0b13154f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,520 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.10 or higher. We aim to support the 3 latest versions of Go.
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+
+```go
+import (
+ "database/sql"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+// ...
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+if err != nil {
+ panic(err)
+}
+// See "Important settings" section.
+db.SetConnMaxLifetime(time.Minute * 3)
+db.SetMaxOpenConns(10)
+db.SetMaxIdleConns(10)
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+### Important settings
+
+`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
+
+`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
+
+`db.SetMaxIdleConns()` is recommended to be set same to (or greater than) `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed very frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `checkConnLiveness`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+
+On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
+`checkConnLiveness=false` disables this liveness check of connections.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8mb4_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
+
+Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 4194304
+```
+
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify, preferred,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine:
+```
+user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+
+### Unicode support
+Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 00000000..b2f19e8f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,425 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, nil
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ return err
+ }
+ data[4] = cachingSha2PasswordRequestPublicKey
+ mc.writePacket(data)
+
+ // parse public key
+ if data, err = mc.readPacket(); err != nil {
+ return err
+ }
+
+ block, rest := pem.Decode(data[1:])
+ if block == nil {
+ return fmt.Errorf("No Pem data found, data: %s", rest)
+ }
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 00000000..0774c5c8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+const maxCachedBufSize = 256 * 1024
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+// This buffer is backed by two byte slices in a double-buffering scheme
+type buffer struct {
+ buf []byte // buf is a byte buffer who's length and capacity are equal.
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+ dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
+ flipcnt uint // flipccnt is the current buffer counter for double-buffering
+}
+
+// newBuffer allocates and returns a new buffer.
+func newBuffer(nc net.Conn) buffer {
+ fg := make([]byte, defaultBufSize)
+ return buffer{
+ buf: fg,
+ nc: nc,
+ dbuf: [2][]byte{fg, nil},
+ }
+}
+
+// flip replaces the active buffer with the background buffer
+// this is a delayed flip that simply increases the buffer counter;
+// the actual flip will be performed the next time we call `buffer.fill`
+func (b *buffer) flip() {
+ b.flipcnt += 1
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+ // fill data into its double-buffering target: if we've called
+ // flip on this buffer, we'll be copying to the background buffer,
+ // and then filling it with network data; otherwise we'll just move
+ // the contents of the current buffer to the front before filling it
+ dest := b.dbuf[b.flipcnt&1]
+
+ // grow buffer if necessary to fit the whole packet.
+ if need > len(dest) {
+ // Round up to the next multiple of the default size
+ dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+
+ // if the allocated buffer is not too large, move it to backing storage
+ // to prevent extra allocations on applications that perform large reads
+ if len(dest) <= maxCachedBufSize {
+ b.dbuf[b.flipcnt&1] = dest
+ }
+ }
+
+ // if we're filling the fg buffer, move the existing data to the start of it.
+ // if we're filling the bg buffer, copy over the data
+ if n > 0 {
+ copy(dest[:n], b.buf[b.idx:])
+ }
+
+ b.buf = dest
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// takeBuffer returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+
+ // test (cheap) general case first
+ if length <= cap(b.buf) {
+ return b.buf[:length], nil
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf, nil
+ }
+
+ // buffer is larger than we want to store.
+ return make([]byte, length), nil
+}
+
+// takeSmallBuffer is shortcut which can be used if length is
+// known to be smaller than defaultBufSize.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf[:length], nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// cap and len of the returned buffer will be equal.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf, nil
+}
+
+// store stores buf, an updated buffer, if its suitable to do so.
+func (b *buffer) store(buf []byte) error {
+ if b.length > 0 {
+ return ErrBusyBuffer
+ } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
+ b.buf = buf[:cap(buf)]
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 00000000..326a9f7f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,265 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8mb4_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
+//
+// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
+//
+// ucs2, utf16, and utf32 can't be used for connection charset.
+// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
+// They are commented out to reduce this map.
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ //"ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ //"utf16_general_ci": 54,
+ //"utf16_bin": 55,
+ //"utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ //"utf32_general_ci": 60,
+ //"utf32_bin": 61,
+ //"utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "utf8_tolower_ci": 76,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ //"ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ //"utf16_unicode_ci": 101,
+ //"utf16_icelandic_ci": 102,
+ //"utf16_latvian_ci": 103,
+ //"utf16_romanian_ci": 104,
+ //"utf16_slovenian_ci": 105,
+ //"utf16_polish_ci": 106,
+ //"utf16_estonian_ci": 107,
+ //"utf16_spanish_ci": 108,
+ //"utf16_swedish_ci": 109,
+ //"utf16_turkish_ci": 110,
+ //"utf16_czech_ci": 111,
+ //"utf16_danish_ci": 112,
+ //"utf16_lithuanian_ci": 113,
+ //"utf16_slovak_ci": 114,
+ //"utf16_spanish2_ci": 115,
+ //"utf16_roman_ci": 116,
+ //"utf16_persian_ci": 117,
+ //"utf16_esperanto_ci": 118,
+ //"utf16_hungarian_ci": 119,
+ //"utf16_sinhala_ci": 120,
+ //"utf16_german2_ci": 121,
+ //"utf16_croatian_ci": 122,
+ //"utf16_unicode_520_ci": 123,
+ //"utf16_vietnamese_ci": 124,
+ //"ucs2_unicode_ci": 128,
+ //"ucs2_icelandic_ci": 129,
+ //"ucs2_latvian_ci": 130,
+ //"ucs2_romanian_ci": 131,
+ //"ucs2_slovenian_ci": 132,
+ //"ucs2_polish_ci": 133,
+ //"ucs2_estonian_ci": 134,
+ //"ucs2_spanish_ci": 135,
+ //"ucs2_swedish_ci": 136,
+ //"ucs2_turkish_ci": 137,
+ //"ucs2_czech_ci": 138,
+ //"ucs2_danish_ci": 139,
+ //"ucs2_lithuanian_ci": 140,
+ //"ucs2_slovak_ci": 141,
+ //"ucs2_spanish2_ci": 142,
+ //"ucs2_roman_ci": 143,
+ //"ucs2_persian_ci": 144,
+ //"ucs2_esperanto_ci": 145,
+ //"ucs2_hungarian_ci": 146,
+ //"ucs2_sinhala_ci": 147,
+ //"ucs2_german2_ci": 148,
+ //"ucs2_croatian_ci": 149,
+ //"ucs2_unicode_520_ci": 150,
+ //"ucs2_vietnamese_ci": 151,
+ //"ucs2_general_mysql500_ci": 159,
+ //"utf32_unicode_ci": 160,
+ //"utf32_icelandic_ci": 161,
+ //"utf32_latvian_ci": 162,
+ //"utf32_romanian_ci": 163,
+ //"utf32_slovenian_ci": 164,
+ //"utf32_polish_ci": 165,
+ //"utf32_estonian_ci": 166,
+ //"utf32_spanish_ci": 167,
+ //"utf32_swedish_ci": 168,
+ //"utf32_turkish_ci": 169,
+ //"utf32_czech_ci": 170,
+ //"utf32_danish_ci": 171,
+ //"utf32_lithuanian_ci": 172,
+ //"utf32_slovak_ci": 173,
+ //"utf32_spanish2_ci": 174,
+ //"utf32_roman_ci": 175,
+ //"utf32_persian_ci": 176,
+ //"utf32_esperanto_ci": 177,
+ //"utf32_hungarian_ci": 178,
+ //"utf32_sinhala_ci": 179,
+ //"utf32_german2_ci": 180,
+ //"utf32_croatian_ci": 181,
+ //"utf32_unicode_520_ci": 182,
+ //"utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+ "gb18030_chinese_ci": 248,
+ "gb18030_bin": 249,
+ "gb18030_unicode_520_ci": 250,
+ "utf8mb4_0900_ai_ci": 255,
+}
+
+// A denylist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+ "gb18030_chinese_ci": true,
+ "gb18030_bin": true,
+ "gb18030_unicode_520_ci": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
new file mode 100644
index 00000000..024eb285
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go
@@ -0,0 +1,54 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
+
+package mysql
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ var sysErr error
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ err = rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
new file mode 100644
index 00000000..ea7fb607
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
@@ -0,0 +1,17 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
+
+package mysql
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 00000000..835f8972
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,650 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+ reset bool // set when the Go SQL package calls ResetSession
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- context.Context
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ var cmdSet strings.Builder
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset: character_set_connection, character_set_client, character_set_results
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // Other system vars accumulated in a single SET command
+ default:
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteByte(',')
+ }
+ cmdSet.WriteString(param)
+ cmdSet.WriteByte('=')
+ cmdSet.WriteString(val)
+ }
+ }
+
+ if cmdSet.Len() > 0 {
+ err = mc.exec(cmdSet.String())
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.IsSet() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
+ errLog.Print(err)
+ return nil, driver.ErrBadConn
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf, err := mc.buf.takeCompleteBuffer()
+ if err != nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case uint64:
+ // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
+ buf = strconv.AppendUint(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ buf = append(buf, '\'')
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, '\'')
+ }
+ case json.RawMessage:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ return nil, driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan context.Context, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx context.Context
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ return driver.ErrBadConn
+ }
+ mc.reset = true
+ return nil
+}
+
+// IsValid implements driver.Validator interface
+// (From Go 1.15)
+func (mc *mysqlConn) IsValid() bool {
+ return !mc.closed.IsSet()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
new file mode 100644
index 00000000..d567b4e4
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -0,0 +1,146 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql/driver"
+ "net"
+)
+
+type connector struct {
+ cfg *Config // immutable private copy.
+}
+
+// Connect implements driver.Connector interface.
+// Connect returns a connection to the database.
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ cfg: c.cfg,
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
+ }
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ mc.startWatcher()
+ if err := mc.watchCancel(ctx); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ defer mc.finish()
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ authData, plugin, err := mc.readHandshakePacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+// Driver implements driver.Connector interface.
+// Driver returns &MySQLDriver{}.
+func (c *connector) Driver() driver.Driver {
+ return &MySQLDriver{}
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 00000000..b1e6b85e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 00000000..c1bdf119
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,107 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "net"
+ "sync"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+//
+// Deprecated: users should register a DialContextFunc instead
+type DialFunc func(addr string) (net.Conn, error)
+
+// DialContextFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDialContext
+type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
+
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialContextFunc
+)
+
+// RegisterDialContext registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// The current context for the connection and its address is passed to the dial function.
+func RegisterDialContext(net string, dial DialContextFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials == nil {
+ dials = make(map[string]DialContextFunc)
+ }
+ dials[net] = dial
+}
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+//
+// Deprecated: users should call RegisterDialContext instead
+func RegisterDial(network string, dial DialFunc) {
+ RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
+ return dial(addr)
+ })
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formatted
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ c := &connector{
+ cfg: cfg,
+ }
+ return c.Connect(context.Background())
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
+
+// NewConnector returns new driver.Connector.
+func NewConnector(cfg *Config) (driver.Connector, error) {
+ cfg = cfg.Clone()
+ // normalize the contents of cfg so calls to NewConnector have the same
+ // behavior as MySQLDriver.OpenConnector
+ if err := cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return &connector{cfg: cfg}, nil
+}
+
+// OpenConnector implements driver.DriverContext.
+func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ return &connector{
+ cfg: cfg,
+ }, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 00000000..93f3548c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,560 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ CheckConnLiveness bool // Check connections for liveness before using them
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ CheckConnLiveness: true,
+ }
+}
+
+func (cfg *Config) Clone() *Config {
+ cp := *cfg
+ if cp.tls != nil {
+ cp.tls = cfg.tls.Clone()
+ }
+ if len(cp.Params) > 0 {
+ cp.Params = make(map[string]string, len(cfg.Params))
+ for k, v := range cfg.Params {
+ cp.Params[k] = v
+ }
+ }
+ if cfg.pubKey != nil {
+ cp.pubKey = &rsa.PublicKey{
+ N: new(big.Int).Set(cfg.pubKey.N),
+ E: cfg.pubKey.E,
+ }
+ }
+ return &cp
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ switch cfg.TLSConfig {
+ case "false", "":
+ // don't set anything
+ case "true":
+ cfg.tls = &tls.Config{}
+ case "skip-verify", "preferred":
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ default:
+ cfg.tls = getTLSConfigClone(cfg.TLSConfig)
+ if cfg.tls == nil {
+ return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
+ }
+ }
+
+ if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+
+ if cfg.ServerPubKey != "" {
+ cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
+ if cfg.pubKey == nil {
+ return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
+ }
+ }
+
+ return nil
+}
+
+func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
+ buf.Grow(1 + len(name) + 1 + len(value))
+ if !*hasParam {
+ *hasParam = true
+ buf.WriteByte('?')
+ } else {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(name)
+ buf.WriteByte('=')
+ buf.WriteString(value)
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
+ }
+
+ if !cfg.AllowNativePasswords {
+ writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
+ }
+
+ if cfg.AllowOldPasswords {
+ writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
+ }
+
+ if !cfg.CheckConnLiveness {
+ writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
+ }
+
+ if cfg.ClientFoundRows {
+ writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ writeDSNParam(&buf, &hasParam, "collation", col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
+ }
+
+ if cfg.InterpolateParams {
+ writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ writeDSNParam(&buf, &hasParam, "multiStatements", "true")
+ }
+
+ if cfg.ParseTime {
+ writeDSNParam(&buf, &hasParam, "parseTime", "true")
+ }
+
+ if cfg.ReadTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
+ }
+
+ if len(cfg.ServerPubKey) > 0 {
+ writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
+ }
+
+ if cfg.Timeout > 0 {
+ writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+ // Disable INFILE allowlist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Check connections for Liveness before using them
+ case "checkConnLiveness":
+ var isBool bool
+ cfg.CheckConnLiveness, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+ cfg.ServerPubKey = name
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
+ cfg.TLSConfig = vl
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+ cfg.TLSConfig = name
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 00000000..760782ff
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,65 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 00000000..ed6c7a37
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(nullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
new file mode 100644
index 00000000..fa75adf6
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fuzz.go
@@ -0,0 +1,24 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build gofuzz
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+func Fuzz(data []byte) int {
+ db, err := sql.Open("mysql", string(data))
+ if err != nil {
+ return 0
+ }
+ db.Close()
+ return 1
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 00000000..60effdfc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file allowlist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the allowlist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
new file mode 100644
index 00000000..651723a9
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "time"
+)
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime([]byte(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
new file mode 100644
index 00000000..453b4b39
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.13
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+//
+// Deprecated: NullTime doesn't honor the loc DSN parameter.
+// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
+// Use sql.NullTime instead.
+type NullTime sql.NullTime
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = sql.NullTime // sql.NullTime is available
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
new file mode 100644
index 00000000..9f7ae27a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
@@ -0,0 +1,39 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !go1.13
+
+package mysql
+
+import (
+ "time"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+ Time time.Time
+ Valid bool // Valid is true if Time is not NULL
+}
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = NullTime // sql.NullTime is not available
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 00000000..6664e5ae
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1349 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)-1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.reset {
+ mc.reset = false
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ // If this connection has a ReadTimeout which we've been setting on
+ // reads, reset it to its default value before we attempt a non-blocking
+ // read, otherwise the scheduler will just time us out before we can read
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Time{})
+ }
+ if err == nil && mc.cfg.CheckConnLiveness {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ errLog.Print("closing bad idle connection: ", err)
+ mc.Close()
+ return driver.ErrBadConn
+ }
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialization Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
+ if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
+ }
+
+ if data[0] == iERR {
+ return nil, "", mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, "", fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ authData := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, "", ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+ if mc.cfg.TLSConfig == "preferred" {
+ mc.cfg.tls = nil
+ } else {
+ return nil, "", ErrNoTLS
+ }
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
+
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.tls != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.tls != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.rawConn = mc.netConn
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ pos += copy(data[pos:], plugin)
+ data[pos] = 0x00
+ pos++
+
+ // Send Auth packet
+ return mc.writePacket(data[:pos])
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data, err := mc.buf.takeSmallBuffer(pktLen)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ //sqlstate := string(data[4 : 4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ return &MySQLError{
+ Number: errno,
+ Message: string(data[pos:]),
+ }
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Filler [uint8]
+ pos++
+
+ // Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var n int
+ var isNull bool
+ pos := 0
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ if !mc.parseTime {
+ continue
+ } else {
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp, fieldTypeDateTime,
+ fieldTypeDate, fieldTypeNewDate:
+ dest[i], err = parseDateTime(
+ dest[i].([]byte),
+ mc.cfg.Loc,
+ )
+ if err == nil {
+ continue
+ }
+ default:
+ continue
+ }
+ }
+
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err // err != nil
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Cannot use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Determine threshold dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+ var err error
+
+ if len(args) == 0 {
+ data, err = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data, err = mc.buf.takeCompleteBuffer()
+ // In this case the len(data) == cap(data) which is used to optimise the flow below.
+ }
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ // No need to clean nullMask as make ensures that.
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := range nullMask {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ if v, ok := arg.(json.RawMessage); ok {
+ arg = []byte(v)
+ }
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case uint64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x80 // type is unsigned
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ if err != nil {
+ return err
+ }
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("cannot convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ if err = mc.buf.store(data); err != nil {
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 00000000..c6438d03
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 00000000..888bdb5f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,223 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // flip the buffer for this connection if we need to drain it.
+ // note that for a successful query (i.e. one where rows.next()
+ // has been called until it returns false), `rows.mc` will be nil
+ // by the time the user calls `(*Rows).Close`, so we won't reach this
+ // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
+ mc.buf.flip()
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 00000000..18a3ae49
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,220 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+var jsonType = reflect.TypeOf(json.RawMessage{})
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if driver.IsValue(sv) {
+ return sv, nil
+ }
+ // A value returend from the Valuer interface can be "a type handled by
+ // a database driver's NamedValueChecker interface" so we should accept
+ // uint64 here as well.
+ if u, ok := sv.(uint64); ok {
+ return u, nil
+ }
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ switch t := rv.Type(); {
+ case t == jsonType:
+ return v, nil
+ case t.Elem().Kind() == reflect.Uint8:
+ return rv.Bytes(), nil
+ default:
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
+ }
+ case reflect.String:
+ return rv.String(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 00000000..417d7279
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 00000000..d6545f5b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,868 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Registry for custom tls.Configs
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = v.Clone()
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+ const base = "0000-00-00 00:00:00.000000"
+ switch len(b) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if string(b) == base[:len(b)] {
+ return time.Time{}, nil
+ }
+
+ year, err := parseByteYear(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if year <= 0 {
+ year = 1
+ }
+
+ if b[4] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+ }
+
+ m, err := parseByte2Digits(b[5], b[6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if m <= 0 {
+ m = 1
+ }
+ month := time.Month(m)
+
+ if b[7] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+ }
+
+ day, err := parseByte2Digits(b[8], b[9])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if day <= 0 {
+ day = 1
+ }
+ if len(b) == 10 {
+ return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+ }
+
+ if b[10] != ' ' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+ }
+
+ hour, err := parseByte2Digits(b[11], b[12])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[13] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+ }
+
+ min, err := parseByte2Digits(b[14], b[15])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[16] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+ }
+
+ sec, err := parseByte2Digits(b[17], b[18])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 19 {
+ return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+ }
+
+ if b[19] != '.' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+ }
+ nsec, err := parseByteNanoSec(b[20:])
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
+ default:
+ return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
+ }
+}
+
+func parseByteYear(b []byte) (int, error) {
+ year, n := 0, 1000
+ for i := 0; i < 4; i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ year += v * n
+ n = n / 10
+ }
+ return year, nil
+}
+
+func parseByte2Digits(b1, b2 byte) (int, error) {
+ d1, err := bToi(b1)
+ if err != nil {
+ return 0, err
+ }
+ d2, err := bToi(b2)
+ if err != nil {
+ return 0, err
+ }
+ return d1*10 + d2, nil
+}
+
+func parseByteNanoSec(b []byte) (int, error) {
+ ns, digit := 0, 100000 // max is 6-digits
+ for i := 0; i < len(b); i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ ns += v * digit
+ digit /= 10
+ }
+ // nanoseconds has 10-digits. (needs to scale digits)
+ // 10 - 6 = 4, so we have to multiple 1000.
+ return ns * 1000, nil
+}
+
+func bToi(b byte) (int, error) {
+ if b < '0' || b > '9' {
+ return 0, errors.New("not [0-9]")
+ }
+ return int(b - '0'), nil
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ nsec := t.Nanosecond()
+
+ if year < 1 || year > 9999 {
+ return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+ }
+ year100 := year / 100
+ year1 := year % 100
+
+ var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+ localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+ localBuf[4] = '-'
+ localBuf[5], localBuf[6] = digits10[month], digits01[month]
+ localBuf[7] = '-'
+ localBuf[8], localBuf[9] = digits10[day], digits01[day]
+
+ if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+ return append(buf, localBuf[:10]...), nil
+ }
+
+ localBuf[10] = ' '
+ localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+ localBuf[13] = ':'
+ localBuf[14], localBuf[15] = digits10[min], digits01[min]
+ localBuf[16] = ':'
+ localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
+
+ if nsec == 0 {
+ return append(buf, localBuf[:19]...), nil
+ }
+ nsec100000000 := nsec / 100000000
+ nsec1000000 := (nsec / 1000000) % 100
+ nsec10000 := (nsec / 10000) % 100
+ nsec100 := (nsec / 100) % 100
+ nsec1 := nsec % 100
+ localBuf[19] = '.'
+
+ // milli second
+ localBuf[20], localBuf[21], localBuf[22] =
+ digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+ // micro second
+ localBuf[23], localBuf[24], localBuf[25] =
+ digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+ // nano second
+ localBuf[26], localBuf[27], localBuf[28] =
+ digits01[nsec100], digits10[nsec1], digits01[nsec1]
+
+ // trim trailing zeros
+ n := len(localBuf)
+ for n > 0 && localBuf[n-1] == '0' {
+ n--
+ }
+
+ return append(buf, localBuf[:n]...), nil
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+
+ switch b[0] {
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns whether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns whether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/.gitignore b/vendor/github.com/golang-jwt/jwt/.gitignore
new file mode 100644
index 00000000..09573e01
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/LICENSE b/vendor/github.com/golang-jwt/jwt/LICENSE
new file mode 100644
index 00000000..35dbc252
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
new file mode 100644
index 00000000..c4efbd2a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md
@@ -0,0 +1,22 @@
+## Migration Guide (v3.2.1)
+
+Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
+
+### go.mod replacement
+
+In a first step, the easiest way is to use `go mod edit` to issue a replacement.
+
+```
+go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible
+go mod tidy
+```
+
+This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work.
+
+### Cleanup
+
+If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed.
+
+## Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
\ No newline at end of file
diff --git a/vendor/github.com/golang-jwt/jwt/README.md b/vendor/github.com/golang-jwt/jwt/README.md
new file mode 100644
index 00000000..9b653e46
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/README.md
@@ -0,0 +1,113 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic.
+
+Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path.
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy).
+So we will support a major version of Go until there are two newer major releases.
+We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities
+which will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
+
+In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
+
+* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
+* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
+* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples)
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
+
+Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
+
+## Compliance
+
+This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences:
+
+* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases).
+
+While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning.
+
+**BREAKING CHANGES:***
+* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+
+## Usage Tips
+
+### Signing vs Encryption
+
+A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
+
+* The author of the token was in the possession of the signing secret
+* The data has not been modified since it was signed
+
+It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
+
+### Choosing a Signing Method
+
+There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
+
+Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
+
+Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+
+### JWT and OAuth
+
+It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
+
+Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
+
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
+* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
+
+### Troubleshooting
+
+This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
+
+## More
+
+Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
+
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
new file mode 100644
index 00000000..637f2ba6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md
@@ -0,0 +1,131 @@
+## `jwt-go` Version History
+
+#### 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+#### 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+#### 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+#### 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+#### 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go
new file mode 100644
index 00000000..f1dba3cb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/claims.go
@@ -0,0 +1,146 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// For a type to be a Claims object, it must just have a Valid method that determines
+// if the token is invalid for any supported reason
+type Claims interface {
+ Valid() error
+}
+
+// Structured version of Claims Section, as referenced at
+// https://tools.ietf.org/html/rfc7519#section-4.1
+// See examples for how to use this with your own claim types
+type StandardClaims struct {
+ Audience string `json:"aud,omitempty"`
+ ExpiresAt int64 `json:"exp,omitempty"`
+ Id string `json:"jti,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ Subject string `json:"sub,omitempty"`
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (c StandardClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ // The claims below are optional, by default, so if they are set to the
+ // default value in Go, let's not fail the verification for them.
+ if !c.VerifyExpiresAt(now, false) {
+ delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
+ vErr.Inner = fmt.Errorf("token is expired by %v", delta)
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !c.VerifyIssuedAt(now, false) {
+ vErr.Inner = fmt.Errorf("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !c.VerifyNotBefore(now, false) {
+ vErr.Inner = fmt.Errorf("token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
+
+// Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
+ return verifyAud([]string{c.Audience}, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ return verifyExp(c.ExpiresAt, cmp, req)
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ return verifyIat(c.IssuedAt, cmp, req)
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
+ return verifyIss(c.Issuer, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ return verifyNbf(c.NotBefore, cmp, req)
+}
+
+// ----- helpers
+
+func verifyAud(aud []string, cmp string, required bool) bool {
+ if len(aud) == 0 {
+ return !required
+ }
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if len(stringClaims) == 0 {
+ return !required
+ }
+
+ return result
+}
+
+func verifyExp(exp int64, now int64, required bool) bool {
+ if exp == 0 {
+ return !required
+ }
+ return now <= exp
+}
+
+func verifyIat(iat int64, now int64, required bool) bool {
+ if iat == 0 {
+ return !required
+ }
+ return now >= iat
+}
+
+func verifyIss(iss string, cmp string, required bool) bool {
+ if iss == "" {
+ return !required
+ }
+ if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
+ return true
+ } else {
+ return false
+ }
+}
+
+func verifyNbf(nbf int64, now int64, required bool) bool {
+ if nbf == 0 {
+ return !required
+ }
+ return now >= nbf
+}
diff --git a/vendor/github.com/golang-jwt/jwt/doc.go b/vendor/github.com/golang-jwt/jwt/doc.go
new file mode 100644
index 00000000..a86dc1a3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go
new file mode 100644
index 00000000..15e23435
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/ecdsa.go
@@ -0,0 +1,142 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// Implements the ECDSA family of signing methods signing methods
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return ErrInvalidKeyType
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return "", ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return EncodeSegment(out), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
new file mode 100644
index 00000000..db9f4be7
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
+)
+
+// Parse PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go
new file mode 100644
index 00000000..a2f8ddbe
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/ed25519.go
@@ -0,0 +1,81 @@
+package jwt
+
+import (
+ "errors"
+
+ "crypto/ed25519"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// Implements the EdDSA family
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error {
+ var err error
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) {
+ var ed25519Key ed25519.PrivateKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PrivateKey); !ok {
+ return "", ErrInvalidKeyType
+ }
+
+ // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize
+ // this allows to avoid recover usage
+ if len(ed25519Key) != ed25519.PrivateKeySize {
+ return "", ErrInvalidKey
+ }
+
+ // Sign the string and return the encoded result
+ sig := ed25519.Sign(ed25519Key, []byte(signingString))
+ return EncodeSegment(sig), nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
new file mode 100644
index 00000000..c6357275
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key")
+)
+
+// Parse PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go
new file mode 100644
index 00000000..1c93024a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/errors.go
@@ -0,0 +1,59 @@
+package jwt
+
+import (
+ "errors"
+)
+
+// Error constants
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+)
+
+// The errors that might occur when parsing and validating a token
+const (
+ ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
+ ValidationErrorUnverifiable // Token could not be verified because of signing problems
+ ValidationErrorSignatureInvalid // Signature validation failed
+
+ // Standard Claim validation errors
+ ValidationErrorAudience // AUD validation failed
+ ValidationErrorExpired // EXP validation failed
+ ValidationErrorIssuedAt // IAT validation failed
+ ValidationErrorIssuer // ISS validation failed
+ ValidationErrorNotValidYet // NBF validation failed
+ ValidationErrorId // JTI validation failed
+ ValidationErrorClaimsInvalid // Generic claims validation error
+)
+
+// Helper for constructing a ValidationError with a string error message
+func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
+ return &ValidationError{
+ text: errorText,
+ Errors: errorFlags,
+ }
+}
+
+// The error from Parse if token is not valid
+type ValidationError struct {
+ Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
+ Errors uint32 // bitfield. see ValidationError... constants
+ text string // errors that do not have a valid error just have text
+}
+
+// Validation error is an error type
+func (e ValidationError) Error() string {
+ if e.Inner != nil {
+ return e.Inner.Error()
+ } else if e.text != "" {
+ return e.text
+ } else {
+ return "token is invalid"
+ }
+}
+
+// No errors
+func (e *ValidationError) valid() bool {
+ return e.Errors == 0
+}
diff --git a/vendor/github.com/golang-jwt/jwt/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go
new file mode 100644
index 00000000..addbe5d4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/hmac.go
@@ -0,0 +1,95 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// Implements the HMAC-SHA family of signing methods signing methods
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
+func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Decode signature, for comparison
+ sig, err := DecodeSegment(signature)
+ if err != nil {
+ return err
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Implements the Sign method from SigningMethod for this signing method.
+// Key must be []byte
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return EncodeSegment(hasher.Sum(nil)), nil
+ }
+
+ return "", ErrInvalidKeyType
+}
diff --git a/vendor/github.com/golang-jwt/jwt/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go
new file mode 100644
index 00000000..72c79f92
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/map_claims.go
@@ -0,0 +1,120 @@
+package jwt
+
+import (
+ "encoding/json"
+ "errors"
+ // "fmt"
+)
+
+// Claims type that uses the map[string]interface{} for JSON decoding
+// This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// VerifyAudience Compares the aud claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
+ var aud []string
+ switch v := m["aud"].(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return false
+ }
+ aud = append(aud, vs)
+ }
+ }
+ return verifyAud(aud, cmp, req)
+}
+
+// Compares the exp claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
+ exp, ok := m["exp"]
+ if !ok {
+ return !req
+ }
+ switch expType := exp.(type) {
+ case float64:
+ return verifyExp(int64(expType), cmp, req)
+ case json.Number:
+ v, _ := expType.Int64()
+ return verifyExp(v, cmp, req)
+ }
+ return false
+}
+
+// Compares the iat claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
+ iat, ok := m["iat"]
+ if !ok {
+ return !req
+ }
+ switch iatType := iat.(type) {
+ case float64:
+ return verifyIat(int64(iatType), cmp, req)
+ case json.Number:
+ v, _ := iatType.Int64()
+ return verifyIat(v, cmp, req)
+ }
+ return false
+}
+
+// Compares the iss claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
+ iss, _ := m["iss"].(string)
+ return verifyIss(iss, cmp, req)
+}
+
+// Compares the nbf claim against cmp.
+// If required is false, this method will return true if the value matches or is unset
+func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
+ nbf, ok := m["nbf"]
+ if !ok {
+ return !req
+ }
+ switch nbfType := nbf.(type) {
+ case float64:
+ return verifyNbf(int64(nbfType), cmp, req)
+ case json.Number:
+ v, _ := nbfType.Int64()
+ return verifyNbf(v, cmp, req)
+ }
+ return false
+}
+
+// Validates time based claims "exp, iat, nbf".
+// There is no accounting for clock skew.
+// As well, if any of the above claims are not in the token, it will still
+// be considered a valid claim.
+func (m MapClaims) Valid() error {
+ vErr := new(ValidationError)
+ now := TimeFunc().Unix()
+
+ if !m.VerifyExpiresAt(now, false) {
+ vErr.Inner = errors.New("Token is expired")
+ vErr.Errors |= ValidationErrorExpired
+ }
+
+ if !m.VerifyIssuedAt(now, false) {
+ vErr.Inner = errors.New("Token used before issued")
+ vErr.Errors |= ValidationErrorIssuedAt
+ }
+
+ if !m.VerifyNotBefore(now, false) {
+ vErr.Inner = errors.New("Token is not valid yet")
+ vErr.Errors |= ValidationErrorNotValidYet
+ }
+
+ if vErr.valid() {
+ return nil
+ }
+
+ return vErr
+}
diff --git a/vendor/github.com/golang-jwt/jwt/none.go b/vendor/github.com/golang-jwt/jwt/none.go
new file mode 100644
index 00000000..f04d189d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/none.go
@@ -0,0 +1,52 @@
+package jwt
+
+// Implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if signature != "" {
+ return NewValidationError(
+ "'none' signing method with non-empty signature",
+ ValidationErrorSignatureInvalid,
+ )
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return "", nil
+ }
+ return "", NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go
new file mode 100644
index 00000000..d6901d9a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/parser.go
@@ -0,0 +1,148 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type Parser struct {
+ ValidMethods []string // If populated, only these methods will be considered valid
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+ SkipClaimsValidation bool // Skip claims validation during token parsing
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.ValidMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.ValidMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ // keyFunc returned an error
+ if ve, ok := err.(*ValidationError); ok {
+ return token, ve
+ }
+ return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
+ }
+
+ vErr := &ValidationError{}
+
+ // Validate Claims
+ if !p.SkipClaimsValidation {
+ if err := token.Claims.Valid(); err != nil {
+
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
+ }
+ }
+
+ // Perform validation
+ token.Signature = parts[2]
+ if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ vErr.Inner = err
+ vErr.Errors |= ValidationErrorSignatureInvalid
+ }
+
+ if vErr.valid() {
+ token.Valid = true
+ return token, nil
+ }
+
+ return token, vErr
+}
+
+// WARNING: Don't use this method unless you know what you're doing
+//
+// This method parses the token but doesn't validate the signature. It's only
+// ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from
+// it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ parts = strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ return token, parts, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go
new file mode 100644
index 00000000..e4caf1ca
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/rsa.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSA family of signing methods signing methods
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Implements the Verify method from SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return "", ErrInvalidKey
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go
new file mode 100644
index 00000000..c0147086
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/rsa_pss.go
@@ -0,0 +1,142 @@
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// Implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Implements the Verify method from SigningMethod
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
+ var err error
+
+ // Decode the signature
+ var sig []byte
+ if sig, err = DecodeSegment(signature); err != nil {
+ return err
+ }
+
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return ErrInvalidKey
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Implements the Sign method from SigningMethod
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return "", ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return "", ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return EncodeSegment(sigBytes), nil
+ } else {
+ return "", err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go
new file mode 100644
index 00000000..14c78c29
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/rsa_utils.go
@@ -0,0 +1,101 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
+)
+
+// Parse PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// Parse PEM encoded PKCS1 or PKCS8 public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go
new file mode 100644
index 00000000..ed1f212b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/signing_method.go
@@ -0,0 +1,35 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// Implement SigningMethod to add new methods for signing or verifying tokens.
+type SigningMethod interface {
+ Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// Register the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// Get a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/token.go b/vendor/github.com/golang-jwt/jwt/token.go
new file mode 100644
index 00000000..6b30ced1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/token.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
+// You can override it to use another time value. This is useful for testing or if your
+// server uses a different time zone than your tokens.
+var TimeFunc = time.Now
+
+// Parse methods use this callback function to supply
+// the key for verification. The function receives the parsed,
+// but unverified Token. This allows you to use properties in the
+// Header of the token (such as `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// A JWT Token. Different fields will be used depending on whether you're
+// creating or parsing/verifying a token.
+type Token struct {
+ Raw string // The raw token. Populated when you Parse a token
+ Method SigningMethod // The signing method used or to be used
+ Header map[string]interface{} // The first segment of the token
+ Claims Claims // The second segment of the token
+ Signature string // The third segment of the token. Populated when you Parse a token
+ Valid bool // Is the token valid? Populated when you Parse/Verify a token
+}
+
+// Create a new Token. Takes a signing method
+func New(method SigningMethod) *Token {
+ return NewWithClaims(method, MapClaims{})
+}
+
+func NewWithClaims(method SigningMethod, claims Claims) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// Get the complete, signed token
+func (t *Token) SignedString(key interface{}) (string, error) {
+ var sig, sstr string
+ var err error
+ if sstr, err = t.SigningString(); err != nil {
+ return "", err
+ }
+ if sig, err = t.Method.Sign(sstr, key); err != nil {
+ return "", err
+ }
+ return strings.Join([]string{sstr, sig}, "."), nil
+}
+
+// Generate the signing string. This is the
+// most expensive part of the whole deal. Unless you
+// need this for something special, just go straight for
+// the SignedString.
+func (t *Token) SigningString() (string, error) {
+ var err error
+ parts := make([]string, 2)
+ for i := range parts {
+ var jsonValue []byte
+ if i == 0 {
+ if jsonValue, err = json.Marshal(t.Header); err != nil {
+ return "", err
+ }
+ } else {
+ if jsonValue, err = json.Marshal(t.Claims); err != nil {
+ return "", err
+ }
+ }
+
+ parts[i] = EncodeSegment(jsonValue)
+ }
+ return strings.Join(parts, "."), nil
+}
+
+// Parse, validate, and return a token.
+// keyFunc will receive the parsed token and should return the key for validating.
+// If everything is kosher, err will be nil
+func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).Parse(tokenString, keyFunc)
+}
+
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
+}
+
+// Encode JWT specific base64url encoding with padding stripped
+func EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
+
+// Decode JWT specific base64url encoding with padding stripped
+func DecodeSegment(seg string) ([]byte, error) {
+ return base64.RawURLEncoding.DecodeString(seg)
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..0f646931
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 00000000..e810e6fe
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 00000000..d399bf06
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 00000000..e8db57e0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: Do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: Do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..2187e877
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+func DiscardUnknown(m Message) {
+ if m != nil {
+ discardUnknown(MessageReflect(m))
+ }
+}
+
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..42fc120c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,356 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
+
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
+
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
+
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
+ }
+
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
+ })
+ }
+
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
+ }
+ return has
+}
+
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
+ return true
+ })
+ }
+ clearUnknown(mr, fieldNum(xt.Field))
+}
+
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
+}
+
+// GetExtension retrieves a proto2 extended field from m.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
+ }
+
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
+ }
+
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
+ return nil, ErrMissingExtension
+ }
+
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ return v, nil
+}
+
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
+ }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
+ if err != nil {
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
+ }
+ vs[i] = v
+ }
+ return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
+ }
+
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+ }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
+ }
+ }
+
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
+ return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
+ }
+
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
+ }
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
+ }
+ return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ return true
+ default:
+ return false
+ }
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..dcdc2202
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
+type StructProperties struct {
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the protobuf field name.
+ OneofTypes map[string]*OneofProperties
+}
+
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
+type Properties struct {
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
+ WireType int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
+ Required bool
+ // Optional reports whether this is a optional field.
+ Optional bool
+ // Repeated reports whether this is a repeated field.
+ Repeated bool
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
+
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += "," + strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != "" {
+ s += ",json=" + p.JSONName
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
+ s += ",proto3"
+ }
+ if p.Oneof {
+ s += ",oneof"
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
+ p.Optional = true
+ case s == "req":
+ p.Required = true
+ case s == "rep":
+ p.Repeated = true
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
+ p.Packed = true
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ p.HasDefault = true
+ p.Default, i = tag[len("def="):], len(tag)
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
+}
+
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
+
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
+func GetProperties(t reflect.Type) *StructProperties {
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
+ }
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
+}
+
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+
+ var hasOneof bool
+ prop := new(StructProperties)
+
+ // Construct a list of properties for each field in the struct.
+ for i := 0; i < t.NumField(); i++ {
+ p := new(Properties)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
+
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
+ }
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+ }
+
+ prop.Prop = append(prop.Prop, p)
+ }
+
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
+
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
+ Prop: new(Properties),
+ }
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
+ }
+ }
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
+ }
+ }
+
+ return prop
+}
+
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 00000000..5aee89c3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 00000000..066b4323
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 00000000..47eb3e44
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 00000000..a31134ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte(""), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 00000000..d7c28da5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 00000000..398e3485
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/google/go-github/v43/AUTHORS b/vendor/github.com/google/go-github/v43/AUTHORS
new file mode 100644
index 00000000..80bd26dd
--- /dev/null
+++ b/vendor/github.com/google/go-github/v43/AUTHORS
@@ -0,0 +1,357 @@
+# This is the official list of go-github authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control or
+# https://github.com/google/go-github/graphs/contributors.
+#
+# Authors who wish to be recognized in this file should add themselves (or
+# their employer, as appropriate).
+
+178inaba
+2BFL
+413x
+Abhinav Gupta
+adrienzieba
+afdesk
+Ahmed Hagy
+Aidan Steele
+Ainsley Chong
+ajz01
+Akeda Bagus
+Akhil Mohan
+Alec Thomas
+Aleks Clark
+Alex Bramley
+Alex Orr
+Alex Unger
+Alexander Harkness
+Alexis Gauthiez
+Ali Farooq
+Allen Sun
+Amey Sakhadeo
+Anders Janmyr
+Andreas Garnæs
+Andrew Ryabchun
+Andy Grunwald
+Andy Hume
+Andy Lindeman
+angie pinilla
+anjanashenoy
+Anshuman Bhartiya
+Antoine
+Antoine Pelisse
+Anton Nguyen
+Anubha Kushwaha
+appilon
+aprp
+Aravind
+Arda Kuyumcu
+Arıl Bozoluk
+Asier Marruedo
+Austin Burdine
+Austin Dizzy
+Azuka Okuleye
+Ben Batha
+Benjamen Keroack
+Beshr Kayali
+Beyang Liu
+Billy Keyes
+Billy Lynch
+Björn Häuser
+boljen
+Brad Harris
+Brad Moylan
+Bradley Falzon
+Bradley McAllister
+Brandon Cook
+Brett Logan
+Brian Egizi
+Bryan Boreham
+Cami Diez
+Carl Johnson
+Carlos Alexandro Becker
+Carlos Tadeu Panato Junior
+chandresh-pancholi
+Charles Fenwick Elliott
+Charlie Yan
+Chmouel Boudjnah
+Chris King
+Chris Mc
+Chris Raborg
+Chris Roche
+Chris Schaefer
+chrisforrette
+Christian Muehlhaeuser
+Christoph Sassenberg
+Colin Misare
+Craig Peterson
+Cristian Maglie
+Daehyeok Mun
+Daniel Lanner
+Daniel Leavitt
+Daniel Nilsson
+Daoq
+Dave Du Cros
+Dave Henderson
+Dave Perrett
+Dave Protasowski
+David Deng
+David J. M. Karlsen
+David Jannotta
+David Ji
+David Lopez Reyes
+Davide Zipeto
+Dennis Webb
+Derek Jobst
+Dhi Aurrahman
+Diego Lapiduz
+Dmitri Shuralyov
+dmnlk
+Don Petersen
+Doug Turner
+Drew Fradette
+Dustin Deus
+Eivind
+Eli Uriegas
+Elliott Beach
+Emerson Wood
+eperm
+Erick Fejta
+Erik Nobel
+erwinvaneyk
+Evan Elias
+Fabian Holler
+Fabrice
+Felix Geisendörfer
+Filippo Valsorda
+Florian Forster
+Francesc Gil
+Francis
+Francisco Guimarães
+Fredrik Jönsson
+Garrett Squire
+George Kontridze
+Georgy Buranov
+Glen Mailer
+Gnahz
+Google Inc.
+Grachev Mikhail