diff --git a/vendor/git.rootprojects.org/root/go-gitver/.gitignore b/vendor/git.rootprojects.org/root/go-gitver/.gitignore new file mode 100644 index 0000000..bd0c823 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/.gitignore @@ -0,0 +1,17 @@ +xversion.go +zversion.go + +# ---> Go +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/git.rootprojects.org/root/go-gitver/LICENSE b/vendor/git.rootprojects.org/root/go-gitver/LICENSE new file mode 100644 index 0000000..09f2798 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/LICENSE @@ -0,0 +1,312 @@ +Mozilla Public License Version 2.0 + + 1. Definitions + +1.1. "Contributor" means each individual or legal entity that creates, contributes +to the creation of, or owns Covered Software. + +1.2. "Contributor Version" means the combination of the Contributions of others +(if any) used by a Contributor and that particular Contributor's Contribution. + + 1.3. "Contribution" means Covered Software of a particular Contributor. + +1.4. "Covered Software" means Source Code Form to which the initial Contributor +has attached the notice in Exhibit A, the Executable Form of such Source Code +Form, and Modifications of such Source Code Form, in each case including portions +thereof. + + 1.5. "Incompatible With Secondary Licenses" means + +(a) that the initial Contributor has attached the notice described in Exhibit +B to the Covered Software; or + +(b) that the Covered Software was made available under the terms of version +1.1 or earlier of the License, but not also under the terms of a Secondary +License. + +1.6. "Executable Form" means any form of the work other than Source Code Form. + +1.7. "Larger Work" means a work that combines Covered Software with other +material, in a separate file or files, that is not Covered Software. + + 1.8. "License" means this document. + +1.9. "Licensable" means having the right to grant, to the maximum extent possible, +whether at the time of the initial grant or subsequently, any and all of the +rights conveyed by this License. + + 1.10. "Modifications" means any of the following: + +(a) any file in Source Code Form that results from an addition to, deletion +from, or modification of the contents of Covered Software; or + +(b) any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor means any patent claim(s), including +without limitation, method, process, and apparatus claims, in any patent Licensable +by such Contributor that would be infringed, but for the grant of the License, +by the making, using, selling, offering for sale, having made, import, or +transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" means either the GNU General Public License, Version +2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General +Public License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") means an individual or a legal entity exercising rights +under this License. For legal entities, "You" includes any entity that controls, +is controlled by, or is under common control with You. For purposes of this +definition, "control" means (a) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or otherwise, +or (b) ownership of more than fifty percent (50%) of the outstanding shares +or beneficial ownership of such entity. + + 2. License Grants and Conditions + + 2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive +license: + +(a) under intellectual property rights (other than patent or trademark) Licensable +by such Contributor to use, reproduce, make available, modify, display, perform, +distribute, and otherwise exploit its Contributions, either on an unmodified +basis, with Modifications, or as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer for +sale, have made, import, and otherwise transfer either its Contributions or +its Contributor Version. + + 2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution become +effective for each Contribution on the date the Contributor first distributes +such Contribution. + + 2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under this +License. No additional rights or licenses will be implied from the distribution +or licensing of Covered Software under this License. Notwithstanding Section +2.1(b) above, no patent license is granted by a Contributor: + +(a) for any code that a Contributor has removed from Covered Software; or + +(b) for infringements caused by: (i) Your and any other third party's modifications +of Covered Software, or (ii) the combination of its Contributions with other +software (except as part of its Contributor Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of its +Contributions. + +This License does not grant any rights in the trademarks, service marks, or +logos of any Contributor (except as may be necessary to comply with the notice +requirements in Section 3.4). + + 2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to distribute +the Covered Software under a subsequent version of this License (see Section +10.2) or under the terms of a Secondary License (if permitted under the terms +of Section 3.3). + + 2.5. Representation + +Each Contributor represents that the Contributor believes its Contributions +are its original creation(s) or it has sufficient rights to grant the rights +to its Contributions conveyed by this License. + + 2.6. Fair Use + +This License is not intended to limit any rights You have under applicable +copyright doctrines of fair use, fair dealing, or other equivalents. + + 2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in +Section 2.1. + + 3. Responsibilities + + 3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any Modifications +that You create or to which You contribute, must be under the terms of this +License. You must inform recipients that the Source Code Form of the Covered +Software is governed by the terms of this License, and how they can obtain +a copy of this License. You may not attempt to alter or restrict the recipients' +rights in the Source Code Form. + + 3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code Form, +as described in Section 3.1, and You must inform recipients of the Executable +Form how they can obtain a copy of such Source Code Form by reasonable means +in a timely manner, at a charge no more than the cost of distribution to the +recipient; and + +(b) You may distribute such Executable Form under the terms of this License, +or sublicense it under different terms, provided that the license for the +Executable Form does not attempt to limit or alter the recipients' rights +in the Source Code Form under this License. + + 3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, provided +that You also comply with the requirements of this License for the Covered +Software. If the Larger Work is a combination of Covered Software with a work +governed by one or more Secondary Licenses, and the Covered Software is not +Incompatible With Secondary Licenses, this License permits You to additionally +distribute such Covered Software under the terms of such Secondary License(s), +so that the recipient of the Larger Work may, at their option, further distribute +the Covered Software under the terms of either this License or such Secondary +License(s). + + 3.4. Notices + +You may not remove or alter the substance of any license notices (including +copyright notices, patent notices, disclaimers of warranty, or limitations +of liability) contained within the Source Code Form of the Covered Software, +except that You may alter any license notices to the extent required to remedy +known factual inaccuracies. + + 3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, indemnity +or liability obligations to one or more recipients of Covered Software. However, +You may do so only on Your own behalf, and not on behalf of any Contributor. +You must make it absolutely clear that any such warranty, support, indemnity, +or liability obligation is offered by You alone, and You hereby agree to indemnify +every Contributor for any liability incurred by such Contributor as a result +of warranty, support, indemnity or liability terms You offer. You may include +additional disclaimers of warranty and limitations of liability specific to +any jurisdiction. + + 4. Inability to Comply Due to Statute or Regulation + +If it is impossible for You to comply with any of the terms of this License +with respect to some or all of the Covered Software due to statute, judicial +order, or regulation then You must: (a) comply with the terms of this License +to the maximum extent possible; and (b) describe the limitations and the code +they affect. Such description must be placed in a text file included with +all distributions of the Covered Software under this License. Except to the +extent prohibited by statute or regulation, such description must be sufficiently +detailed for a recipient of ordinary skill to be able to understand it. + + 5. Termination + +5.1. The rights granted under this License will terminate automatically if +You fail to comply with any of its terms. However, if You become compliant, +then the rights granted under this License from a particular Contributor are +reinstated (a) provisionally, unless and until such Contributor explicitly +and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor +fails to notify You of the non-compliance by some reasonable means prior to +60 days after You have come back into compliance. Moreover, Your grants from +a particular Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the first +time You have received notice of non-compliance with this License from such +Contributor, and You become compliant prior to 30 days after Your receipt +of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent infringement +claim (excluding declaratory judgment actions, counter-claims, and cross-claims) +alleging that a Contributor Version directly or indirectly infringes any patent, +then the rights granted to You by any and all Contributors for the Covered +Software under Section 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end +user license agreements (excluding distributors and resellers) which have +been validly granted by You or Your distributors under this License prior +to termination shall survive termination. + + 6. Disclaimer of Warranty + +Covered Software is provided under this License on an "as is" basis, without +warranty of any kind, either expressed, implied, or statutory, including, +without limitation, warranties that the Covered Software is free of defects, +merchantable, fit for a particular purpose or non-infringing. The entire risk +as to the quality and performance of the Covered Software is with You. Should +any Covered Software prove defective in any respect, You (not any Contributor) +assume the cost of any necessary servicing, repair, or correction. This disclaimer +of warranty constitutes an essential part of this License. No use of any Covered +Software is authorized under this License except under this disclaimer. + + 7. Limitation of Liability + +Under no circumstances and under no legal theory, whether tort (including +negligence), contract, or otherwise, shall any Contributor, or anyone who +distributes Covered Software as permitted above, be liable to You for any +direct, indirect, special, incidental, or consequential damages of any character +including, without limitation, damages for lost profits, loss of goodwill, +work stoppage, computer failure or malfunction, or any and all other commercial +damages or losses, even if such party shall have been informed of the possibility +of such damages. This limitation of liability shall not apply to liability +for death or personal injury resulting from such party's negligence to the +extent applicable law prohibits such limitation. Some jurisdictions do not +allow the exclusion or limitation of incidental or consequential damages, +so this exclusion and limitation may not apply to You. + + 8. Litigation + +Any litigation relating to this License may be brought only in the courts +of a jurisdiction where the defendant maintains its principal place of business +and such litigation shall be governed by laws of that jurisdiction, without +reference to its conflict-of-law provisions. Nothing in this Section shall +prevent a party's ability to bring cross-claims or counter-claims. + + 9. Miscellaneous + +This License represents the complete agreement concerning the subject matter +hereof. If any provision of this License is held to be unenforceable, such +provision shall be reformed only to the extent necessary to make it enforceable. +Any law or regulation which provides that the language of a contract shall +be construed against the drafter shall not be used to construe this License +against a Contributor. + + 10. Versions of the License + + 10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section 10.3, +no one other than the license steward has the right to modify or publish new +versions of this License. Each version will be given a distinguishing version +number. + + 10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version of +the License under which You originally received the Covered Software, or under +the terms of any subsequent version published by the license steward. + + 10.3. Modified Versions + +If you create software not governed by this License, and you want to create +a new license for such software, you may create and use a modified version +of this License if you rename the license and remove any references to the +name of the license steward (except to note that such modified license differs +from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + +If You choose to distribute Source Code Form that is Incompatible With Secondary +Licenses under the terms of this version of the License, the notice described +in Exhibit B of this License must be attached. Exhibit A - Source Code Form +License Notice + +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + +This Source Code Form is "Incompatible With Secondary Licenses", as defined +by the Mozilla Public License, v. 2.0. diff --git a/vendor/git.rootprojects.org/root/go-gitver/README.md b/vendor/git.rootprojects.org/root/go-gitver/README.md new file mode 100644 index 0000000..8dcd0a5 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/README.md @@ -0,0 +1,195 @@ +# git-version.go + +Use git tags to add semver to your go package. + +```txt +Goal: Either use an exact version like v1.0.0 + or translate the git version like v1.0.0-4-g0000000 + to a semver like v1.0.1-pre4+g0000000 + + Fail gracefully when git repo isn't available. +``` + +# Demo + +Generate an `xversion.go` file: + +```bash +go run git.rootprojects.org/root/go-gitver +cat xversion.go +``` + +**Note**: The file is named `xversion.go` by default so that the +generated file's `init()` will come later, and thus take priority, over +most other files. + +See `go-gitver`s self-generated version: + +```bash +go run git.rootprojects.org/root/go-gitver version +``` + +# QuickStart + +Add this to the top of your main file: + +```go +//go:generate go run -mod=vendor git.rootprojects.org/root/go-gitver + +``` + +Add a file that imports go-gitver (for versioning) + +```go +// +build tools + +package example + +import _ "git.rootprojects.org/root/go-gitver" +``` + +Change you build instructions to be something like this: + +```bash +go mod vendor +go generate -mod=vendor ./... +go build -mod=vendor -o example cmd/example/*.go +``` + +You don't have to use `mod vendor`, but I highly recommend it. + +# Options + +```txt +version print version and exit +--fail exit with non-zero status code on failure +--package will set the package name +--outfile will replace `xversion.go` with the given file path +``` + +ENVs + +```bash +# Alias for --fail +GITVER_FAIL=true +``` + +For example: + +```go +//go:generate go run -mod=vendor git.rootprojects.org/root/go-gitver --fail + +``` + +```bash +go run -mod=vendor git.rootprojects.org/root/go-gitver version +``` + +# Usage + +See `examples/basic` + +1. Create a `tools` package in your project +2. Guard it against regular builds with `// +build tools` +3. Include `_ "git.rootprojects.org/root/go-gitver"` in the imports +4. Declare `var GitRev, GitVersion, GitTimestamp string` in your `package main` +5. Include `//go:generate go run -mod=vendor git.rootprojects.org/root/go-gitver` as well + +`tools/tools.go`: + +```go +// +build tools + +// This is a dummy package for build tooling +package tools + +import ( + _ "git.rootprojects.org/root/go-gitver" +) +``` + +`main.go`: + +```go +//go:generate go run git.rootprojects.org/root/go-gitver --fail + +package main + +import "fmt" + +var ( + GitRev = "0000000" + GitVersion = "v0.0.0-pre0+0000000" + GitTimestamp = "0000-00-00T00:00:00+0000" +) + +func main() { + fmt.Println(GitRev) + fmt.Println(GitVersion) + fmt.Println(GitTimestamp) +} +``` + +If you're using `go mod vendor` (which I highly recommend that you do), +you'd modify the `go:generate` ever so slightly: + +```go +//go:generate go run -mod=vendor git.rootprojects.org/root/go-gitver --fail +``` + +The only reason I didn't do that in the example is that I'd be included +the repository in itself and that would be... weird. + +# Why a tools package? + +> import "git.rootprojects.org/root/go-gitver" is a program, not an importable package + +Having a tools package with a build tag that you don't use is a nice way to add exact +versions of a command package used for tooling to your `go.mod` with `go mod tidy`, +without getting the error above. + +# git: behind the curtain + +These are the commands that are used under the hood to produce the versions. + +Shows the git tag + description. Assumes that you're using the semver format `v1.0.0` for your base tags. + +```bash +git describe --tags --dirty --always +# v1.0.0 +# v1.0.0-1-g0000000 +# v1.0.0-dirty +``` + +Show the commit date (when the commit made it into the current tree). +Internally we use the current date when the working tree is dirty. + +```bash +git show v1.0.0-1-g0000000 --format=%cd --date=format:%Y-%m-%dT%H:%M:%SZ%z --no-patch +# 2010-01-01T20:30:00Z-0600 +# fatal: ambiguous argument 'v1.0.0-1-g0000000-dirty': unknown revision or path not in the working tree. +``` + +Shows the most recent commit. + +```bash +git rev-parse HEAD +# 0000000000000000000000000000000000000000 +``` + +# Errors + +### cannot find package "." + +```txt +package git.rootprojects.org/root/go-gitver: cannot find package "." in: + /Users/me/go-example/vendor/git.rootprojects.org/root/go-gitver +cmd/example/example.go:1: running "go": exit status 1 +``` + +You forgot to update deps and re-vendor: + +```bash +go mod tidy +go mod vendor +``` diff --git a/vendor/git.rootprojects.org/root/go-gitver/gitver.go b/vendor/git.rootprojects.org/root/go-gitver/gitver.go new file mode 100644 index 0000000..d3cf421 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/gitver.go @@ -0,0 +1,98 @@ +//go:generate go run -mod=vendor git.rootprojects.org/root/go-gitver + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" + "text/template" + "time" + + "git.rootprojects.org/root/go-gitver/gitver" +) + +var GitRev, GitVersion, GitTimestamp string +var exitCode int +var verFile = "xversion.go" + +func main() { + pkg := "main" + + args := os.Args[1:] + for i := range args { + arg := args[i] + if "-f" == arg || "--fail" == arg { + exitCode = 1 + } else if ("--outfile" == arg || "-o" == arg) && len(args) > i+1 { + verFile = args[i+1] + args[i+1] = "" + } else if "--package" == arg && len(args) > i+1 { + pkg = args[i+1] + args[i+1] = "" + } else if "-V" == arg || "version" == arg || "-version" == arg || "--version" == arg { + fmt.Println(GitRev) + fmt.Println(GitVersion) + fmt.Println(GitTimestamp) + os.Exit(0) + } + } + if "" != os.Getenv("GITVER_FAIL") && "false" != os.Getenv("GITVER_FAIL") { + exitCode = 1 + } + + v, err := gitver.ExecAndParse() + if nil != err { + log.Fatalf("Failed to get git version: %s\n", err) + os.Exit(exitCode) + } + + // Create or overwrite the go file from template + var buf bytes.Buffer + if err := versionTpl.Execute(&buf, struct { + Package string + Timestamp string + Version string + GitRev string + }{ + Package: pkg, + Timestamp: v.Timestamp.Format(time.RFC3339), + Version: v.Version, + GitRev: v.Rev, + }); nil != err { + panic(err) + } + + // Format + src, err := format.Source(buf.Bytes()) + if nil != err { + panic(err) + } + + // Write to disk (in the Current Working Directory) + f, err := os.Create(verFile) + if nil != err { + panic(err) + } + if _, err := f.Write(src); nil != err { + panic(err) + } + if err := f.Close(); nil != err { + panic(err) + } + +} + +var versionTpl = template.Must(template.New("").Parse(`// Code generated by go generate; DO NOT EDIT. +package {{ .Package }} + +func init() { + GitRev = "{{ .GitRev }}" + {{ if .Version -}} + GitVersion = "{{ .Version }}" + {{ end -}} + GitTimestamp = "{{ .Timestamp }}" +} +`)) diff --git a/vendor/git.rootprojects.org/root/go-gitver/gitver/gitver.go b/vendor/git.rootprojects.org/root/go-gitver/gitver/gitver.go new file mode 100644 index 0000000..fc04418 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/gitver/gitver.go @@ -0,0 +1,143 @@ +package gitver + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "time" +) + +var exactVer *regexp.Regexp +var gitVer *regexp.Regexp + +func init() { + // exactly vX.Y.Z (go-compatible semver) + exactVer = regexp.MustCompile(`^v\d+\.\d+\.\d+$`) + + // vX.Y.Z-n-g0000000 git post-release, semver prerelease + // vX.Y.Z-dirty git post-release, semver prerelease + gitVer = regexp.MustCompile(`^(v\d+\.\d+)\.(\d+)(-(\d+))?(-(g[0-9a-f]+))?(-(dirty))?`) +} + +type Versions struct { + Timestamp time.Time + Version string + Rev string +} + +func ExecAndParse() (*Versions, error) { + desc, err := gitDesc() + if nil != err { + return nil, err + } + rev, err := gitRev() + if nil != err { + return nil, err + } + ver, err := semVer(desc) + if nil != err { + return nil, err + } + ts, err := gitTimestamp(desc) + if nil != err { + ts = time.Now() + } + + return &Versions{ + Timestamp: ts, + Version: ver, + Rev: rev, + }, nil +} + +func gitDesc() (string, error) { + args := strings.Split("git describe --tags --dirty --always", " ") + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + if nil != err { + // Don't panic, just carry on + //out = []byte("v0.0.0-0-g0000000") + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func gitRev() (string, error) { + args := strings.Split("git rev-parse HEAD", " ") + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + if nil != err { + return "", fmt.Errorf("\nUnexpected Error\n\n"+ + "Please open an issue at https://git.rootprojects.org/root/go-gitver/issues/new \n"+ + "Please include the following:\n\n"+ + "Command: %s\n"+ + "Output: %s\n"+ + "Error: %s\n"+ + "\nPlease and Thank You.\n\n", strings.Join(args, " "), out, err) + } + return strings.TrimSpace(string(out)), nil +} + +func semVer(desc string) (string, error) { + if exactVer.MatchString(desc) { + // v1.0.0 + return desc, nil + } + + if !gitVer.MatchString(desc) { + return "", nil + } + + // (v1.0).(0)(-(1))(-(g0000000))(-(dirty)) + vers := gitVer.FindStringSubmatch(desc) + patch, err := strconv.Atoi(vers[2]) + if nil != err { + return "", fmt.Errorf("\nUnexpected Error\n\n"+ + "Please open an issue at https://git.rootprojects.org/root/go-gitver/issues/new \n"+ + "Please include the following:\n\n"+ + "git description: %s\n"+ + "RegExp: %#v\n"+ + "Error: %s\n"+ + "\nPlease and Thank You.\n\n", desc, gitVer, err) + } + + // v1.0.1-pre1 + // v1.0.1-pre1+g0000000 + // v1.0.1-pre0+dirty + // v1.0.1-pre0+g0000000-dirty + if "" == vers[4] { + vers[4] = "0" + } + ver := fmt.Sprintf("%s.%d-pre%s", vers[1], patch+1, vers[4]) + if "" != vers[6] || "dirty" == vers[8] { + ver += "+" + if "" != vers[6] { + ver += vers[6] + if "" != vers[8] { + ver += "-" + } + } + ver += vers[8] + } + + return ver, nil +} + +func gitTimestamp(desc string) (time.Time, error) { + args := []string{ + "git", + "show", desc, + "--format=%cd", + "--date=format:%Y-%m-%dT%H:%M:%SZ%z", + "--no-patch", + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + if nil != err { + // a dirty desc was probably used + return time.Time{}, err + } + return time.Parse(time.RFC3339, strings.TrimSpace(string(out))) +} diff --git a/vendor/git.rootprojects.org/root/go-gitver/go.mod b/vendor/git.rootprojects.org/root/go-gitver/go.mod new file mode 100644 index 0000000..af5f2ea --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/go.mod @@ -0,0 +1,3 @@ +module git.rootprojects.org/root/go-gitver + +go 1.12 diff --git a/vendor/git.rootprojects.org/root/go-gitver/version.go b/vendor/git.rootprojects.org/root/go-gitver/version.go new file mode 100644 index 0000000..cd9e4d7 --- /dev/null +++ b/vendor/git.rootprojects.org/root/go-gitver/version.go @@ -0,0 +1,9 @@ +package main + +// use recently generated version info as a fallback +// for when git isn't present (i.e. go run ) +func init() { + GitRev = "0b8c2d86df4bfe32ff4534eec84cd56909c398e9" + GitVersion = "v1.1.1" + GitTimestamp = "2019-06-21T00:18:13-06:00" +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000..0cd3800 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 0000000..8b8afc4 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 0000000..6efcfd0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000..01b5743 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 0000000..3600848 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000..7c1b37e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000..b0fd51d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000..b9914a6 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000..b371f39 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000..d905c21 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 0000000..d36e1dd --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 0000000..e8d503d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000..e0a742a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000..50869ef --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 0000000..562164b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000..c73f8af --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000..608997c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/UnnoTed/fileb0x/.gitignore b/vendor/github.com/UnnoTed/fileb0x/.gitignore new file mode 100644 index 0000000..76eaabd --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/.gitignore @@ -0,0 +1,3 @@ +_example/simple/static/ +_example/echo/myEmbeddedFiles/ +fileb0x diff --git a/vendor/github.com/UnnoTed/fileb0x/CHANGELOG.md b/vendor/github.com/UnnoTed/fileb0x/CHANGELOG.md new file mode 100644 index 0000000..c24e2d9 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/CHANGELOG.md @@ -0,0 +1,30 @@ +# Changelog +All notable changes to this project will be documented in this file. + +To update simply run: +```bash +go get -u github.com/UnnoTed/fileb0x +``` + +## 2018-04-17 +### Changed +- Improved file processing's speed +- Improved walk speed with [godirwalk](https://github.com/karrick/godirwalk) +- Fixed updater's progressbar + +## 2018-03-17 +### Added +- Added condition to files' template to avoid creating error variable when not required. + +## 2018-03-14 +### Removed +- [go-dry](https://github.com/ungerik/go-dry) dependency. + +## 2018-02-22 +### Added +- Avoid rewriting the main b0x file by checking a MD5 hash of the (file's modification time + cfg). +- Avoid rewriting unchanged files by comparing the Timestamp of the b0x's file and the file's modification time. +- Config option `lcf` which when enabled along with `spread` **l**ogs the list of **c**hanged **f**iles to the console. +- Message to inform that no file or cfg changes have been detecTed (not an error). +### Changed +- Config option `clean` to only remove unused b0x files instead of everything. diff --git a/vendor/github.com/UnnoTed/fileb0x/LICENSE b/vendor/github.com/UnnoTed/fileb0x/LICENSE new file mode 100644 index 0000000..3187269 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 UnnoTed (UnnoTedx@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/UnnoTed/fileb0x/README.md b/vendor/github.com/UnnoTed/fileb0x/README.md new file mode 100644 index 0000000..4554cea --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/README.md @@ -0,0 +1,627 @@ +fileb0x [![Circle CI](https://circleci.com/gh/UnnoTed/fileb0x.svg?style=svg)](https://circleci.com/gh/UnnoTed/fileb0x) [![GoDoc](https://godoc.org/github.com/UnnoTed/fileb0x?status.svg)](https://godoc.org/github.com/UnnoTed/fileb0x) [![GoReportCard](https://goreportcard.com/badge/unnoted/fileb0x)](https://goreportcard.com/report/unnoted/fileb0x) +------- + +### What is fileb0x? +A better customizable tool to embed files in go. + +It is an alternative to `go-bindata` that have better features and organized configuration. + +###### TL;DR +a better `go-bindata` + +------- +### How does it compare to `go-bindata`? +Feature | fileb0x | go-bindata +--------------------- | ------------- | ------------------ +gofmt | yes (optional) | no +golint | safe | unsafe +gzip compression | yes | yes +gzip decompression | yes (optional: runtime) | yes (on read) +gzip compression levels | yes | no +separated prefix / base for each file | yes | no (all files only) +different build tags for each file | yes | no +exclude / ignore files | yes (glob) | yes (regex) +spread files | yes | no (single file only) +unexported vars/funcs | yes (optional) | no +virtual memory file system | yes | no +http file system / handler | yes | no +replace text in files | yes | no +glob support | yes | no (walk folders only) +regex support | no | yes (ignore files only) +config file | yes (config file only) | no (cmd args only) +update files remotely | yes | no + +------- +### What are the benefits of using a Virtual Memory File System? +By using a virtual memory file system you can have access to files like when they're stored in a hard drive instead of a `map[string][]byte` you would be able to use IO writer and reader. +This means you can `read`, `write`, `remove`, `stat` and `rename` files also `make`, `remove` and `stat` directories. + +###### TL;DR +Virtual Memory File System has similar functions as a hdd stored files would have. + + + +### Features + +- [x] golint safe code output + +- [x] optional: gzip compression (with optional run-time decompression) + +- [x] optional: formatted code (gofmt) + +- [x] optional: spread files + +- [x] optional: unexporTed variables, functions and types + +- [x] optional: include multiple files and folders + +- [x] optional: exclude files or/and folders + +- [x] optional: replace text in files + +- [x] optional: custom base and prefix path + +- [x] Virtual Memory FileSystem - [webdav](https://godoc.org/golang.org/x/net/webdav) + +- [x] HTTP FileSystem and Handler + +- [x] glob support - [doublestar](https://github.com/bmatcuk/doublestar) + +- [x] json / yaml / toml support + +- [x] optional: Update files remotely + +- [x] optional: Build tags for each file + + +### License +MIT + + +### Get Started + +###### TL;DR QuickStart™ + +Here's the get-you-going in 30 seconds or less: + +```bash +git clone https://github.com/UnnoTed/fileb0x.git +cd fileb0x +cd _example/simple +go generate +go build +./simple +``` + +* `mod.go` defines the package as `example.com/foo/simple` +* `b0x.yaml` defines the sub-package `static` from the folder `public` +* `main.go` includes the comment `//go:generate go run github.com/UnnoTed/fileb0x b0x.yaml` +* `main.go` also includes the import `example.com/foo/simple/static` +* `go generate` locally installs `fileb0x` which generates `./static` according to `bax.yaml` +* `go build` creates the binary `simple` from `package main` in the current folder +* `./simple` runs the self-contained standalone webserver with built-in files from `public` + +
+ +How to use it? + +##### 1. Download + +```bash +go get -u github.com/UnnoTed/fileb0x +``` + +##### 2. Create a config file +First you need to create a config file, it can be `*.json`, `*.yaml` or `*.toml`. (`*` means any file name) + +Now write into the file the configuration you wish, you can use the example files as a start. + +json config file example [b0x.json](https://raw.githubusercontent.com/UnnoTed/fileb0x/master/_example/simple/b0x.json) + +yaml config file example [b0x.yaml](https://github.com/UnnoTed/fileb0x/blob/master/_example/simple/b0x.yaml) + +toml config file example [b0x.toml](https://github.com/UnnoTed/fileb0x/blob/master/_example/simple/b0x.toml) + +##### 3. Run +if you prefer to use it from the `cmd or terminal` edit and run the command below. + +```bash +fileb0x YOUR_CONFIG_FILE.yaml +``` + +or if you wish to generate the embedded files through `go generate` just add and edit the line below into your `main.go`. +```go +//go:generate fileb0x YOUR_CONFIG_FILE.yaml +``` + +
+ +
+ What functions and variables fileb0x let me access and what are they for? + +#### HTTP +```go +var HTTP http.FileSystem +``` + +##### Type +[`http.FileSystem`](https://golang.org/pkg/net/http/#FileSystem) + +##### What is it? + +A In-Memory HTTP File System. + +##### What it does? + +Serve files through a HTTP FileServer. + +##### How to use it? +```go +// http.ListenAndServe will create a server at the port 8080 +// it will take http.FileServer() as a param +// +// http.FileServer() will use HTTP as a file system so all your files +// can be avialable through the port 8080 +http.ListenAndServe(":8080", http.FileServer(myEmbeddedFiles.HTTP)) +``` +
+
+ How to use it with `echo`? + +```go +package main + +import ( + "github.com/labstack/echo" + "github.com/labstack/echo/engine/standard" + // your embedded files import here ... + "github.com/UnnoTed/fileb0x/_example/echo/myEmbeddedFiles" +) + +func main() { + e := echo.New() + + // enable any filename to be loaded from in-memory file system + e.GET("/*", echo.WrapHandler(myEmbeddedFiles.Handler)) + + // http://localhost:1337/public/README.md + e.Start(":1337") +} +``` + +##### How to serve a single file through `echo`? +```go +package main + +import ( + "github.com/labstack/echo" + + // your embedded files import here ... + "github.com/UnnoTed/fileb0x/_example/echo/myEmbeddedFiles" +) + +func main() { + e := echo.New() + + // read ufo.html from in-memory file system + htmlb, err := myEmbeddedFiles.ReadFile("ufo.html") + if err != nil { + log.Fatal(err) + } + + // convert to string + html := string(htmlb) + + // serve ufo.html through "/" + e.GET("/", func(c echo.Context) error { + + // serve as html + return c.HTML(http.StatusOK, html) + }) + + e.Start(":1337") +} +``` + +
+ +
+ Examples + +[simple example](https://github.com/UnnoTed/fileb0x/tree/master/_example/simple) - +[main.go](https://github.com/UnnoTed/fileb0x/blob/master/_example/simple/main.go) + +[echo example](https://github.com/UnnoTed/fileb0x/tree/master/_example/echo) - +[main.go](https://github.com/UnnoTed/fileb0x/blob/master/_example/echo/main.go) + +```go +package main + +import ( + "log" + "net/http" + + // your generaTed package + "github.com/UnnoTed/fileb0x/_example/simple/static" +) + +func main() { + files, err := static.WalkDirs("", false) + if err != nil { + log.Fatal(err) + } + + log.Println("ALL FILES", files) + + // here we'll read the file from the virtual file system + b, err := static.ReadFile("public/README.md") + if err != nil { + log.Fatal(err) + } + + // byte to str + s := string(b) + s += "#hello" + + // write file back into the virtual file system + err := static.WriteFile("public/README.md", []byte(s), 0644) + if err != nil { + log.Fatal(err) + } + + + log.Println(string(b)) + + // true = handler + // false = file system + as := false + + // try it -> http://localhost:1337/public/secrets.txt + if as { + // as Handler + panic(http.ListenAndServe(":1337", static.Handler)) + } else { + // as File System + panic(http.ListenAndServe(":1337", http.FileServer(static.HTTP))) + } +} +``` +
+
+ +Update files remotely + +Having to upload an entire binary just to update some files in a b0x and restart a server isn't something that i like to do... + +##### How it works? +By enabling the updater option, the next time that you generate a b0x, it will include a http server, this http server will use a http basic auth and it contains 1 endpoint `/` that accepts 2 methods: `GET, POST`. + +The `GET` method responds with a list of file names and sha256 hash of each file. +The `POST` method is used to upload files, it creates the directory tree of a new file and then creates the file or it updates an existing file from the virtual memory file system... it responds with a `ok` string when the upload is successful. + +##### How to update files remotely? + +1. First enable the updater option in your config file: +```yaml +################## +## yaml example ## +################## + +# updater allows you to update a b0x in a running server +# without having to restart it +updater: + # disabled by default + enabled: false + + # empty mode creates a empty b0x file with just the + # server and the filesystem, then you'll have to upload + # the files later using the cmd: + # fileb0x -update=http://server.com:port b0x.yaml + # + # it avoids long compile time + empty: false + + # amount of uploads at the same time + workers: 3 + + # to get a username and password from a env variable + # leave username and password blank (username: "") + # then set your username and password in the env vars + # (no caps) -> fileb0x_username and fileb0x_password + # + # when using env vars, set it before generating a b0x + # so it can be applied to the updater server. + username: "user" # username: "" + password: "pass" # password: "" + port: 8041 +``` +2. Generate a b0x with the updater option enabled, don't forget to set the username and password for authentication. +3. When your files update, just run `fileb0x -update=http://yourServer.com:8041 b0x.toml` to update the files in the running server. +
+ +
+ Build Tags + +To use build tags for a b0x package just add the tags to the `tags` property in the main object of your config file +```yaml +# default: main +pkg: static + +# destination +dest: "./static/" + +# build tags for the main b0x.go file +tags: "!linux" +``` + +You can also have different build tags for a list of files, you must enable the `spread` property in the main object of your config file, then at the `custom` list, choose the set of files which you want a different build tag +```yaml +# default: main +pkg: static + +# destination +dest: "./static/" + +# build tags for the main b0x.go file +tags: "windows darwin" + +# [spread] means it will make a file to hold all fileb0x data +# and each file into a separaTed .go file +# +# example: +# theres 2 files in the folder assets, they're: hello.json and world.txt +# when spread is activaTed, fileb0x will make a file: +# b0x.go or [output]'s data, assets_hello.json.go and assets_world.txt.go +# +# +# type: bool +# default: false +spread: true + +# type: array of objects +custom: + # type: array of strings + - files: + - "start_space_ship.exe" + + # build tags for this set of files + # it will only work if spread mode is enabled + tags: "windows" + + # type: array of strings + - files: + - "ufo.dmg" + + # build tags for this set of files + # it will only work if spread mode is enabled + tags: "darwin" +``` + +the config above will make: +```yaml +ab0x.go # // +build windows darwin + +b0xfile_ufo.exe.go # // +build windows +b0xfile_start_space_ship.bat.go # // +build darwin +``` +
+ +### Functions and Variables + +
+ FS (File System) + +```go +var FS webdav.FileSystem +``` + +##### Type +[`webdav.FileSystem`](https://godoc.org/golang.org/x/net/webdav#FileSystem) + +##### What is it? + +In-Memory File System. + +##### What it does? + +Lets you `read, write, remove, stat and rename` files and `make, remove and stat` directories... + +##### How to use it? +```go +func main() { + + // you have the following functions available + // they all control files/dirs from/to the in-memory file system! + func Mkdir(name string, perm os.FileMode) error + func OpenFile(name string, flag int, perm os.FileMode) (File, error) + func RemoveAll(name string) error + func Rename(oldName, newName string) error + func Stat(name string) (os.FileInfo, error) + // you should remove those lines ^ + + // 1. creates a directory + err := myEmbeddedFiles.FS.Mkdir(myEmbeddedFiles.CTX, "assets", 0777) + if err != nil { + log.Fatal(err) + } + + // 2. creates a file into the directory we created before and opens it + // with fileb0x you can use ReadFile and WriteFile instead of this complicaTed thing + f, err := myEmbeddedFiles.FS.OpenFile(myEmbeddedFiles.CTX, "assets/memes.txt", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + + data := []byte("I are programmer I make computer beep boop beep beep boop") + + // write the data into the file + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + + // close the file + if err1 := f.Close(); err == nil { + log.Fatal(err1) + } + + // 3. rename a file + // can also move files + err = myEmbeddedFiles.FS.Rename(myEmbeddedFiles.CTX, "assets/memes.txt", "assets/programmer_memes.txt") + if err != nil { + log.Fatal(err) + } + + // 4. checks if the file we renamed exists + if _, err = myEmbeddedFiles.FS.Stat(myEmbeddedFiles.CTX, "assets/programmer_memes.txt"); os.IsExist(err) { + // exists! + + // tries to remove the /assets/ directory + // from the in-memory file system + err = myEmbeddedFiles.FS.RemoveAll(myEmbeddedFiles.CTX, "assets") + if err != nil { + log.Fatal(err) + } + } + + // 5. checks if the dir we removed exists + if _, err = myEmbeddedFiles.FS.Stat(myEmbeddedFiles.CTX, "public/"); os.IsNotExist(err) { + // doesn't exists! + log.Println("works!") + } +} +``` +
+
+ Handler + +```go +var Handler *webdav.Handler +``` + +##### Type +[`webdav.Handler`](https://godoc.org/golang.org/x/net/webdav#Handler) + +##### What is it? + +A HTTP Handler implementation. + +##### What it does? + +Serve your embedded files. + +##### How to use it? +```go +// ListenAndServer will create a http server at port 8080 +// and use Handler as a http handler to serve your embedded files +http.ListenAndServe(":8080", myEmbeddedFiles.Handler) +``` +
+ +
+ ReadFile + +```go +func ReadFile(filename string) ([]byte, error) +``` + +##### Type +[`ioutil.ReadFile`](https://godoc.org/io/ioutil#ReadFile) + +##### What is it? + +A Helper function to read your embedded files. + +##### What it does? + +Reads the specified file from the in-memory file system and return it as a byte slice. + +##### How to use it? +```go +// it works the same way that ioutil.ReadFile does. +// but it will read the file from the in-memory file system +// instead of the hard disk! +// +// the file name is passwords.txt +// topSecretFile is a byte slice ([]byte) +topSecretFile, err := myEmbeddedFiles.ReadFile("passwords.txt") +if err != nil { + log.Fatal(err) +} + +log.Println(string(topSecretFile)) +``` +
+ +
+ WriteFile + +```go +func WriteFile(filename string, data []byte, perm os.FileMode) error +``` + +##### Type +[`ioutil.WriteFile`](https://godoc.org/io/ioutil#WriteFile) + +##### What is it? + +A Helper function to write a file into the in-memory file system. + +##### What it does? + +Writes the `data` into the specified `filename` in the in-memory file system, meaning you embedded a file! + +-- IMPORTANT -- +IT WON'T WRITE THE FILE INTO THE .GO GENERATED FILE, IT WILL BE TEMPORARY, WHILE YOUR APP IS RUNNING THE FILE WILL BE AVAILABLE, +AFTER IT SHUTDOWN, IT IS GONE. + +##### How to use it? +```go +// it works the same way that ioutil.WriteFile does. +// but it will write the file into the in-memory file system +// instead of the hard disk! +// +// the file name is secret.txt +// data should be a byte slice ([]byte) +// 0644 is a unix file permission + +data := []byte("jet fuel can't melt steel beams") +err := myEmbeddedFiles.WriteFile("secret.txt", data, 0644) +if err != nil { + log.Fatal(err) +} +``` +
+ +
+ WalkDirs + +```go +func WalkDirs(name string, includeDirsInList bool, files ...string) ([]string, error) { +``` + +##### Type +`[]string` + +##### What is it? + +A Helper function to walk dirs from the in-memory file system. + +##### What it does? + +Returns a list of files (with option to include dirs) that are currently in the in-memory file system. + +##### How to use it? +```go +includeDirsInTheList := false + +// WalkDirs returns a string slice with all file paths +files, err := myEmbeddedFiles.WalkDirs("", includeDirsInTheList) +if err != nil { + log.Fatal(err) +} + +log.Println("List of all my files", files) +``` + +
diff --git a/vendor/github.com/UnnoTed/fileb0x/bench.bat b/vendor/github.com/UnnoTed/fileb0x/bench.bat new file mode 100644 index 0000000..16f1ce4 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/bench.bat @@ -0,0 +1 @@ +go test -bench=. -benchmem -v \ No newline at end of file diff --git a/vendor/github.com/UnnoTed/fileb0x/bench.txt b/vendor/github.com/UnnoTed/fileb0x/bench.txt new file mode 100644 index 0000000..f23ba29 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/bench.txt @@ -0,0 +1,11 @@ +./_example/echo/ufo.html (1.4kb) +BenchmarkOldConvert-4 50000 37127 ns/op 31200 B/op 11 allocs/op +BenchmarkNewConvert-4 300000 5847 ns/op 12288 B/op 2 allocs/op + +gitkraken's binary (80mb) +BenchmarkOldConvert-4 1 1777277402 ns/op 1750946416 B/op 30 allocs/op +BenchmarkNewConvert-4 5 236663214 ns/op 643629056 B/op 2 allocs/op + +https://www.youtube.com/watch?v=fT4lDU-QLUY (232mb) +BenchmarkOldConvert-4 1 5089024416 ns/op 4071281120 B/op 28 allocs/op +BenchmarkNewConvert-4 2 712384868 ns/op 1856667696 B/op 2 allocs/op diff --git a/vendor/github.com/UnnoTed/fileb0x/circle.yml b/vendor/github.com/UnnoTed/fileb0x/circle.yml new file mode 100644 index 0000000..a173509 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/circle.yml @@ -0,0 +1,3 @@ +test: + override: + - go test ./... -v diff --git a/vendor/github.com/UnnoTed/fileb0x/compression/gzip.go b/vendor/github.com/UnnoTed/fileb0x/compression/gzip.go new file mode 100644 index 0000000..329d142 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/compression/gzip.go @@ -0,0 +1,64 @@ +package compression + +import ( + "bytes" + "compress/flate" + "compress/gzip" +) + +// Gzip compression support +type Gzip struct { + *Options +} + +// NewGzip creates a Gzip + Options variable +func NewGzip() *Gzip { + gz := new(Gzip) + gz.Options = new(Options) + return gz +} + +// Compress to gzip +func (gz *Gzip) Compress(content []byte) ([]byte, error) { + if !gz.Options.Compress { + return content, nil + } + + // method + var m int + switch gz.Options.Method { + case "NoCompression": + m = flate.NoCompression + break + case "BestSpeed": + m = flate.BestSpeed + break + case "BestCompression": + m = flate.BestCompression + break + default: + m = flate.DefaultCompression + break + } + + // compress + var b bytes.Buffer + w, err := gzip.NewWriterLevel(&b, m) + if err != nil { + return nil, err + } + + // insert content + _, err = w.Write(content) + if err != nil { + return nil, err + } + + err = w.Close() + if err != nil { + return nil, err + } + + // compressed content + return b.Bytes(), nil +} diff --git a/vendor/github.com/UnnoTed/fileb0x/compression/options.go b/vendor/github.com/UnnoTed/fileb0x/compression/options.go new file mode 100644 index 0000000..d8a48b0 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/compression/options.go @@ -0,0 +1,22 @@ +package compression + +// Options for compression +type Options struct { + // activates the compression + // default: false + Compress bool + + // valid values are: + // -> "NoCompression" + // -> "BestSpeed" + // -> "BestCompression" + // -> "DefaultCompression" + // + // default: "DefaultCompression" // when: Compress == true && Method == "" + Method string + + // true = do it yourself (the file is written as gzip into the memory file system) + // false = decompress at run time (while writing file into memory file system) + // default: false + Keep bool +} diff --git a/vendor/github.com/UnnoTed/fileb0x/config/config.go b/vendor/github.com/UnnoTed/fileb0x/config/config.go new file mode 100644 index 0000000..50113ca --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/config/config.go @@ -0,0 +1,75 @@ +package config + +import ( + "strings" + + "github.com/UnnoTed/fileb0x/compression" + "github.com/UnnoTed/fileb0x/custom" + "github.com/UnnoTed/fileb0x/updater" +) + +// Config holds the json/yaml/toml data +type Config struct { + Dest string + NoPrefix bool + + Pkg string + Fmt bool // gofmt + Compression *compression.Options + Tags string + + Output string + + Custom []custom.Custom + + Spread bool + Unexported bool + Clean bool + Debug bool + Updater updater.Config + Lcf bool +} + +// Defaults set the default value for some variables +func (cfg *Config) Defaults() error { + // default destination + if cfg.Dest == "" { + cfg.Dest = "/" + } + + // insert "/" at end of dest when it's not found + if !strings.HasSuffix(cfg.Dest, "/") { + cfg.Dest += "/" + } + + // default file name + if cfg.Output == "" { + cfg.Output = "b0x.go" + } + + // inserts .go at the end of file name + if !strings.HasSuffix(cfg.Output, ".go") { + cfg.Output += ".go" + } + + // inserts an A before the output file's name so it can + // run init() before b0xfile's + if !cfg.NoPrefix && !strings.HasPrefix(cfg.Output, "a") { + cfg.Output = "a" + cfg.Output + } + + // default package + if cfg.Pkg == "" { + cfg.Pkg = "main" + } + + if cfg.Compression == nil { + cfg.Compression = &compression.Options{ + Compress: false, + Method: "DefaultCompression", + Keep: false, + } + } + + return nil +} diff --git a/vendor/github.com/UnnoTed/fileb0x/config/file.go b/vendor/github.com/UnnoTed/fileb0x/config/file.go new file mode 100644 index 0000000..071c340 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/config/file.go @@ -0,0 +1,115 @@ +package config + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "path" + + "github.com/UnnoTed/fileb0x/utils" + + "github.com/BurntSushi/toml" + "gopkg.in/yaml.v2" + "fmt" +) + +// File holds config file info +type File struct { + FilePath string + Data []byte + Mode string // "json" || "yaml" || "yml" || "toml" +} + +// FromArg gets the json/yaml/toml file from args +func (f *File) FromArg(read bool) error { + // (length - 1) + arg := os.Args[len(os.Args)-1:][0] + + // get extension + ext := path.Ext(arg) + if len(ext) > 1 { + ext = ext[1:] // remove dot + } + + // when json/yaml/toml file isn't found on last arg + // it searches for a ".json", ".yaml", ".yml" or ".toml" string in all args + if ext != "json" && ext != "yaml" && ext != "yml" && ext != "toml" { + // loop through args + for _, a := range os.Args { + // get extension + ext := path.Ext(a) + + // check for valid extensions + if ext == ".json" || ext == ".yaml" || ext == ".yml" || ext == ".toml" { + f.Mode = ext[1:] // remove dot + ext = f.Mode + arg = a + break + } + } + } else { + f.Mode = ext + } + + // check if extension is json, yaml or toml + // then get it's absolute path + if ext == "json" || ext == "yaml" || ext == "yml" || ext == "toml" { + f.FilePath = arg + + // so we can test without reading a file + if read { + if !utils.Exists(f.FilePath) { + return errors.New("Error: I Can't find the config file at [" + f.FilePath + "]") + } + } + } else { + return errors.New("Error: You must specify a json, yaml or toml file") + } + + return nil +} + +// Parse gets the config file's content from File.Data +func (f *File) Parse() (*Config, error) { + // remove comments + f.RemoveJSONComments() + + to := &Config{} + switch f.Mode { + case "json": + return to, json.Unmarshal(f.Data, to) + case "yaml", "yml": + return to, yaml.Unmarshal(f.Data, to) + case "toml": + return to, toml.Unmarshal(f.Data, to) + default: + return nil, fmt.Errorf("unknown mode '%s'", f.Mode) + } +} + +// Load the json/yaml file that was specified from args +// and transform it into a config struct +func (f *File) Load() (*Config, error) { + var err error + if !utils.Exists(f.FilePath) { + return nil, errors.New("Error: I Can't find the config file at [" + f.FilePath + "]") + } + + // read file + f.Data, err = ioutil.ReadFile(f.FilePath) + if err != nil { + return nil, err + } + + // parse file + return f.Parse() +} + +// RemoveJSONComments from the file +func (f *File) RemoveJSONComments() { + if f.Mode == "json" { + // remove inline comments + f.Data = []byte(regexComments.ReplaceAllString(string(f.Data), "")) + } +} diff --git a/vendor/github.com/UnnoTed/fileb0x/config/regexp.go b/vendor/github.com/UnnoTed/fileb0x/config/regexp.go new file mode 100644 index 0000000..f562460 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/config/regexp.go @@ -0,0 +1,11 @@ +package config + +import "regexp" + +var ( + // used to remove comments from json + regexComments = regexp.MustCompile(`\/\/([\w\s\'].*)`) + + // SafeVarName is used to remove special chars from paths + SafeVarName = regexp.MustCompile(`[^a-zA-Z0-9]`) +) diff --git a/vendor/github.com/UnnoTed/fileb0x/custom/custom.go b/vendor/github.com/UnnoTed/fileb0x/custom/custom.go new file mode 100644 index 0000000..b4bc3f2 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/custom/custom.go @@ -0,0 +1,228 @@ +package custom + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path" + "strings" + + "github.com/UnnoTed/fileb0x/compression" + "github.com/UnnoTed/fileb0x/dir" + "github.com/UnnoTed/fileb0x/file" + "github.com/UnnoTed/fileb0x/updater" + "github.com/UnnoTed/fileb0x/utils" + "github.com/bmatcuk/doublestar" + "github.com/karrick/godirwalk" +) + +const hextable = "0123456789abcdef" + +// SharedConfig holds needed data from config package +// without causing import cycle +type SharedConfig struct { + Output string + Compression *compression.Gzip + Updater updater.Config +} + +// Custom is a set of files with dedicaTed customization +type Custom struct { + Files []string + Base string + Prefix string + Tags string + + Exclude []string + Replace []Replacer +} + +var ( + xx = []byte(`\x`) + start = []byte(`[]byte("`) +) + +const lowerhex = "0123456789abcdef" + +// Parse the files transforming them into a byte string and inserting the file +// into a map of files +func (c *Custom) Parse(files *map[string]*file.File, dirs **dir.Dir, config *SharedConfig) error { + to := *files + dirList := *dirs + + var newList []string + for _, customFile := range c.Files { + // get files from glob + list, err := doublestar.Glob(customFile) + if err != nil { + return err + } + + // insert files from glob into the new list + newList = append(newList, list...) + } + + // copy new list + c.Files = newList + + // 0 files in the list + if len(c.Files) == 0 { + return errors.New("No files found") + } + + // loop through files from glob + for _, customFile := range c.Files { + // gives error when file doesn't exist + if !utils.Exists(customFile) { + return fmt.Errorf("File [%s] doesn't exist", customFile) + } + + cb := func(fpath string, d *godirwalk.Dirent) error { + if config.Updater.Empty && !config.Updater.IsUpdating { + log.Println("empty mode") + return nil + } + + // only files will be processed + if d != nil && d.IsDir() { + return nil + } + + originalPath := fpath + fpath = utils.FixPath(fpath) + + var fixedPath string + if c.Prefix != "" || c.Base != "" { + c.Base = strings.TrimPrefix(c.Base, "./") + + if strings.HasPrefix(fpath, c.Base) { + fixedPath = c.Prefix + fpath[len(c.Base):] + } else { + if c.Base != "" { + fixedPath = c.Prefix + fpath + } + } + + fixedPath = utils.FixPath(fixedPath) + } else { + fixedPath = utils.FixPath(fpath) + } + + // check for excluded files + for _, excludedFile := range c.Exclude { + m, err := doublestar.Match(c.Prefix+excludedFile, fixedPath) + if err != nil { + return err + } + + if m { + return nil + } + } + + info, err := os.Stat(fpath) + if err != nil { + return err + } + + if info.Name() == config.Output { + return nil + } + + // get file's content + content, err := ioutil.ReadFile(fpath) + if err != nil { + return err + } + + replaced := false + + // loop through replace list + for _, r := range c.Replace { + // check if path matches the pattern from property: file + matched, err := doublestar.Match(c.Prefix+r.File, fixedPath) + if err != nil { + return err + } + + if matched { + for pattern, word := range r.Replace { + content = []byte(strings.Replace(string(content), pattern, word, -1)) + replaced = true + } + } + } + + // compress the content + if config.Compression.Options != nil { + content, err = config.Compression.Compress(content) + if err != nil { + return err + } + } + + dst := make([]byte, len(content)*4) + for i := 0; i < len(content); i++ { + dst[i*4] = byte('\\') + dst[i*4+1] = byte('x') + dst[i*4+2] = hextable[content[i]>>4] + dst[i*4+3] = hextable[content[i]&0x0f] + } + + f := file.NewFile() + f.OriginalPath = originalPath + f.ReplacedText = replaced + f.Data = `[]byte("` + string(dst) + `")` + f.Name = info.Name() + f.Path = fixedPath + f.Tags = c.Tags + f.Base = c.Base + f.Prefix = c.Prefix + f.Modified = info.ModTime().String() + + if _, ok := to[fixedPath]; ok { + f.Tags = to[fixedPath].Tags + } + + // insert dir to dirlist so it can be created on b0x's init() + dirList.Insert(path.Dir(fixedPath)) + + // insert file into file list + to[fixedPath] = f + return nil + } + + customFile = utils.FixPath(customFile) + + // unlike filepath.walk, godirwalk will only walk dirs + f, err := os.Open(customFile) + if err != nil { + return err + } + + defer f.Close() + + fs, err := f.Stat() + if err != nil { + return err + } + + if fs.IsDir() { + if err := godirwalk.Walk(customFile, &godirwalk.Options{ + Unsorted: true, + Callback: cb, + }); err != nil { + return err + } + + } else { + if err := cb(customFile, nil); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/UnnoTed/fileb0x/custom/replacer.go b/vendor/github.com/UnnoTed/fileb0x/custom/replacer.go new file mode 100644 index 0000000..8945dae --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/custom/replacer.go @@ -0,0 +1,7 @@ +package custom + +// Replacer strings in a file +type Replacer struct { + File string + Replace map[string]string +} diff --git a/vendor/github.com/UnnoTed/fileb0x/dir/dir.go b/vendor/github.com/UnnoTed/fileb0x/dir/dir.go new file mode 100644 index 0000000..8cff586 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/dir/dir.go @@ -0,0 +1,70 @@ +package dir + +import "strings" + +// Dir holds directory information to insert into templates +type Dir struct { + List [][]string + Blacklist []string +} + +// Exists checks if a directory exists or not +func (d *Dir) Exists(newDir string) bool { + for _, dir := range d.Blacklist { + if dir == newDir { + return true + } + } + + return false +} + +// Parse a directory to build a list of directories to be made at b0x.go +func (d *Dir) Parse(newDir string) []string { + list := strings.Split(newDir, "/") + + var dirWalk []string + + for indx := range list { + dirList := "" + for i := -1; i < indx; i++ { + dirList += list[i+1] + "/" + } + + if !d.Exists(dirList) { + if strings.HasSuffix(dirList, "//") { + dirList = dirList[:len(dirList)-1] + } + + dirWalk = append(dirWalk, dirList) + d.Blacklist = append(d.Blacklist, dirList) + } + } + + return dirWalk +} + +// Insert a new folder to the list +func (d *Dir) Insert(newDir string) { + if !d.Exists(newDir) { + d.Blacklist = append(d.Blacklist, newDir) + d.List = append(d.List, d.Parse(newDir)) + } +} + +// Clean dupes +func (d *Dir) Clean() []string { + var cleanList []string + + for _, dirs := range d.List { + for _, dir := range dirs { + if dir == "./" || dir == "/" || dir == "." || dir == "" { + continue + } + + cleanList = append(cleanList, dir) + } + } + + return cleanList +} diff --git a/vendor/github.com/UnnoTed/fileb0x/file/file.go b/vendor/github.com/UnnoTed/fileb0x/file/file.go new file mode 100644 index 0000000..17078dc --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/file/file.go @@ -0,0 +1,21 @@ +package file + +// File holds file's data +type File struct { + OriginalPath string + Name string + Path string + Data string + Bytes []byte + ReplacedText bool + Tags string + Base string + Prefix string + Modified string +} + +// NewFile creates a new File +func NewFile() *File { + f := new(File) + return f +} diff --git a/vendor/github.com/UnnoTed/fileb0x/file/methods.go b/vendor/github.com/UnnoTed/fileb0x/file/methods.go new file mode 100644 index 0000000..bd4f074 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/file/methods.go @@ -0,0 +1,18 @@ +// +build !windows + +package file + +// GetRemap returns a map's params with +// info required to load files directly +// from the hard drive when using prefix +// and base while debug mode is activaTed +func (f *File) GetRemap() string { + if f.Base == "" && f.Prefix == "" { + return "" + } + + return `"` + f.Path + `": { + "prefix": "` + f.Prefix + `", + "base": "` + f.Base + `", + },` +} diff --git a/vendor/github.com/UnnoTed/fileb0x/file/methods_windows.go b/vendor/github.com/UnnoTed/fileb0x/file/methods_windows.go new file mode 100644 index 0000000..aa84a57 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/file/methods_windows.go @@ -0,0 +1,18 @@ +package file + +import "strings" + +// GetRemap returns a map's params with +// info required to load files directly +// from the hard drive when using prefix +// and base while debug mode is activaTed +func (f *File) GetRemap() string { + if f.Base == "" && f.Prefix == "" { + return "" + } + + return `"` + strings.Replace(f.Path, `\`, `\\`, -1) + `": { + "prefix": "` + f.Prefix + `", + "base": "` + f.Base + `", + },` +} diff --git a/vendor/github.com/UnnoTed/fileb0x/go.mod b/vendor/github.com/UnnoTed/fileb0x/go.mod new file mode 100644 index 0000000..3ff3e65 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/go.mod @@ -0,0 +1,26 @@ +module github.com/UnnoTed/fileb0x + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/airking05/termui v2.2.0+incompatible + github.com/bmatcuk/doublestar v1.1.1 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/karrick/godirwalk v1.7.8 + github.com/labstack/echo v3.2.1+incompatible + github.com/labstack/gommon v0.2.7 // indirect + github.com/maruel/panicparse v1.1.1 // indirect + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-runewidth v0.0.3 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v0.0.0-20170224212429-dcecefd839c4 // indirect + golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b // indirect + golang.org/x/net v0.0.0-20180921000356-2f5d2388922f + golang.org/x/sys v0.0.0-20181019160139-8e24a49d80f8 // indirect + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/UnnoTed/fileb0x/go.sum b/vendor/github.com/UnnoTed/fileb0x/go.sum new file mode 100644 index 0000000..0f2f280 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/go.sum @@ -0,0 +1,50 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/airking05/termui v2.2.0+incompatible h1:S3j2WJzr70u8KjUktaQ0Cmja+R0edOXChltFoQSGG8I= +github.com/airking05/termui v2.2.0+incompatible/go.mod h1:B/M5sgOwSZlvGm3TsR98s1BSzlSH4wPQzUUNwZG+uUM= +github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTSaiQ= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/karrick/godirwalk v1.7.3 h1:UP4CfXf1LfNwXrX6vqWf1DOhuiFRn2hXsqtRAQlQOUQ= +github.com/karrick/godirwalk v1.7.3/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8 h1:VfG72pyIxgtC7+3X9CMHI0AOl4LwyRAg98WAgsvffi8= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/labstack/echo v3.2.1+incompatible h1:J2M7YArHx4gi8p/3fDw8tX19SXhBCoRpviyAZSN3I88= +github.com/labstack/echo v3.2.1+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= +github.com/labstack/gommon v0.2.7 h1:2qOPq/twXDrQ6ooBGrn3mrmVOC+biLlatwgIu8lbzRM= +github.com/labstack/gommon v0.2.7/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4= +github.com/maruel/panicparse v1.1.1 h1:k62YPcEoLncEEpjMt92GtG5ugb8WL/510Ys3/h5IkRc= +github.com/maruel/panicparse v1.1.1/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e h1:fvw0uluMptljaRKSU8459cJ4bmi3qUYyMs5kzpic2fY= +github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v0.0.0-20170224212429-dcecefd839c4 h1:gKMu1Bf6QINDnvyZuTaACm9ofY+PRh+5vFz4oxBZeF8= +github.com/valyala/fasttemplate v0.0.0-20170224212429-dcecefd839c4/go.mod h1:50wTf68f99/Zt14pr046Tgt3Lp2vLyFZKzbFXTOabXw= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f h1:QM2QVxvDoW9PFSPp/zy9FgxJLfaWTZlS61KEPtBwacM= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sys v0.0.0-20181019160139-8e24a49d80f8 h1:R91KX5nmbbvEd7w370cbVzKC+EzCTGqZq63Zad5IcLM= +golang.org/x/sys v0.0.0-20181019160139-8e24a49d80f8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/UnnoTed/fileb0x/lint.bat b/vendor/github.com/UnnoTed/fileb0x/lint.bat new file mode 100644 index 0000000..b2f5b5c --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/lint.bat @@ -0,0 +1,11 @@ +golint ./... + +@echo off +cd .\_example\simple\ +@echo on + +call golint ./... + +@echo off +cd ..\..\ +@echo on \ No newline at end of file diff --git a/vendor/github.com/UnnoTed/fileb0x/main.go b/vendor/github.com/UnnoTed/fileb0x/main.go new file mode 100644 index 0000000..7a62396 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/main.go @@ -0,0 +1,383 @@ +package main + +import ( + "bufio" + "bytes" + "crypto/md5" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/UnnoTed/fileb0x/compression" + "github.com/UnnoTed/fileb0x/config" + "github.com/UnnoTed/fileb0x/custom" + "github.com/UnnoTed/fileb0x/dir" + "github.com/UnnoTed/fileb0x/file" + "github.com/UnnoTed/fileb0x/template" + "github.com/UnnoTed/fileb0x/updater" + "github.com/UnnoTed/fileb0x/utils" + + // just to install automatically + _ "github.com/labstack/echo" + _ "golang.org/x/net/webdav" +) + +var ( + err error + cfg *config.Config + files = make(map[string]*file.File) + dirs = new(dir.Dir) + cfgPath string + + fUpdate string + startTime = time.Now() + + hashStart = []byte("// modification hash(") + hashEnd = []byte(")") + + modTimeStart = []byte("// modified(") + modTimeEnd = []byte(")") +) + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + + // check for updates + flag.StringVar(&fUpdate, "update", "", "-update=http(s)://host:port - default port: 8041") + flag.Parse() + var ( + update = fUpdate != "" + up *updater.Updater + ) + + // create config and try to get b0x file from args + f := new(config.File) + err = f.FromArg(true) + if err != nil { + panic(err) + } + + // load b0x file's config + cfg, err = f.Load() + if err != nil { + panic(err) + } + + err = cfg.Defaults() + if err != nil { + panic(err) + } + + cfgPath = f.FilePath + + if err := cfg.Updater.CheckInfo(); err != nil { + panic(err) + } + + cfg.Updater.IsUpdating = update + + // creates a config that can be inserTed into custom + // without causing a import cycle + sharedConfig := new(custom.SharedConfig) + sharedConfig.Output = cfg.Output + sharedConfig.Updater = cfg.Updater + sharedConfig.Compression = compression.NewGzip() + sharedConfig.Compression.Options = cfg.Compression + + // loop through b0x's [custom] objects + for _, c := range cfg.Custom { + err = c.Parse(&files, &dirs, sharedConfig) + if err != nil { + panic(err) + } + } + + // builds remap's list + var ( + remap string + modHash string + mods []string + lastHash string + ) + + for _, f := range files { + remap += f.GetRemap() + mods = append(mods, f.Modified) + } + + // sorts modification time list and create a md5 of it + sort.Strings(mods) + modHash = stringMD5Hex(strings.Join(mods, "")) + "." + stringMD5Hex(string(f.Data)) + exists := fileExists(cfg.Dest + cfg.Output) + + if exists { + // gets the modification hash from the main b0x file + lastHash, err = getModification(cfg.Dest+cfg.Output, hashStart, hashEnd) + if err != nil { + panic(err) + } + } + + if !exists || lastHash != modHash { + // create files template and exec it + t := new(template.Template) + t.Set("files") + t.Variables = struct { + ConfigFile string + Now string + Pkg string + Files map[string]*file.File + Tags string + Spread bool + Remap string + DirList []string + Compression *compression.Options + Debug bool + Updater updater.Config + ModificationHash string + }{ + ConfigFile: filepath.Base(cfgPath), + Now: time.Now().String(), + Pkg: cfg.Pkg, + Files: files, + Tags: cfg.Tags, + Remap: remap, + Spread: cfg.Spread, + DirList: dirs.Clean(), + Compression: cfg.Compression, + Debug: cfg.Debug, + Updater: cfg.Updater, + ModificationHash: modHash, + } + + tmpl, err := t.Exec() + if err != nil { + panic(err) + } + + if err := os.MkdirAll(cfg.Dest, 0770); err != nil { + panic(err) + } + + // gofmt + if cfg.Fmt { + tmpl, err = format.Source(tmpl) + if err != nil { + panic(err) + } + } + + // write final execuTed template into the destination file + err = ioutil.WriteFile(cfg.Dest+cfg.Output, tmpl, 0640) + if err != nil { + panic(err) + } + } + + // write spread files + var ( + finalList []string + changedList []string + ) + if cfg.Spread { + a := strings.Split(path.Dir(cfg.Dest), "/") + dirName := a[len(a)-1:][0] + + for _, f := range files { + a := strings.Split(path.Dir(f.Path), "/") + fileDirName := a[len(a)-1:][0] + + if dirName == fileDirName { + continue + } + + // transform / to _ and some other chars... + customName := "b0xfile_" + utils.FixName(f.Path) + ".go" + finalList = append(finalList, customName) + + exists := fileExists(cfg.Dest + customName) + var mth string + if exists { + mth, err = getModification(cfg.Dest+customName, modTimeStart, modTimeEnd) + if err != nil { + panic(err) + } + } + + changed := mth != f.Modified + if changed { + changedList = append(changedList, f.OriginalPath) + } + + if !exists || changed { + // creates file template and exec it + t := new(template.Template) + t.Set("file") + t.Variables = struct { + ConfigFile string + Now string + Pkg string + Path string + Name string + Dir [][]string + Tags string + Data string + Compression *compression.Options + Modified string + OriginalPath string + }{ + ConfigFile: filepath.Base(cfgPath), + Now: time.Now().String(), + Pkg: cfg.Pkg, + Path: f.Path, + Name: f.Name, + Dir: dirs.List, + Tags: f.Tags, + Data: f.Data, + Compression: cfg.Compression, + Modified: f.Modified, + OriginalPath: f.OriginalPath, + } + tmpl, err := t.Exec() + if err != nil { + panic(err) + } + + // gofmt + if cfg.Fmt { + tmpl, err = format.Source(tmpl) + if err != nil { + panic(err) + } + } + + // write final execuTed template into the destination file + if err := ioutil.WriteFile(cfg.Dest+customName, tmpl, 0640); err != nil { + panic(err) + } + } + } + } + + // remove b0xfiles when [clean] is true + // it doesn't clean destination's folders + if cfg.Clean { + matches, err := filepath.Glob(cfg.Dest + "b0xfile_*.go") + if err != nil { + panic(err) + } + + // remove matched file if they aren't in the finalList + // which contains the list of all files written by the + // spread option + for _, f := range matches { + var found bool + for _, name := range finalList { + if strings.HasSuffix(f, name) { + found = true + } + } + + if !found { + err = os.Remove(f) + if err != nil { + panic(err) + } + } + } + } + + // main b0x + if lastHash != modHash { + log.Printf("fileb0x: took [%dms] to write [%s] from config file [%s] at [%s]", + time.Since(startTime).Nanoseconds()/1e6, cfg.Dest+cfg.Output, + filepath.Base(cfgPath), time.Now().String()) + } else { + log.Printf("fileb0x: no changes detected") + } + + // log changed files + if cfg.Lcf && len(changedList) > 0 { + log.Printf("fileb0x: list of changed files [%s]", strings.Join(changedList, " | ")) + } + + if update { + if !cfg.Updater.Enabled { + panic("fileb0x: The updater is disabled, enable it in your config file!") + } + + // includes port when not present + if !strings.HasSuffix(fUpdate, ":"+strconv.Itoa(cfg.Updater.Port)) { + fUpdate += ":" + strconv.Itoa(cfg.Updater.Port) + } + + up = &updater.Updater{ + Server: fUpdate, + Auth: updater.Auth{ + Username: cfg.Updater.Username, + Password: cfg.Updater.Password, + }, + Workers: cfg.Updater.Workers, + } + + // get file hashes from server + if err := up.Init(); err != nil { + panic(err) + } + + // check if an update is available, then updates... + if err := up.UpdateFiles(files); err != nil { + panic(err) + } + } +} + +func getModification(path string, start []byte, end []byte) (string, error) { + file, err := os.Open(path) + if err != nil { + return "", err + } + defer file.Close() + + reader := bufio.NewReader(file) + var data []byte + for { + line, _, err := reader.ReadLine() + if err != nil { + return "", err + } + + if !bytes.HasPrefix(line, start) || !bytes.HasSuffix(line, end) { + continue + } + + data = line + break + } + + hash := bytes.TrimPrefix(data, start) + hash = bytes.TrimSuffix(hash, end) + + return string(hash), nil +} + +func fileExists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func stringMD5Hex(data string) string { + hash := md5.New() + hash.Write([]byte(data)) + return fmt.Sprintf("%x", hash.Sum(nil)) +} diff --git a/vendor/github.com/UnnoTed/fileb0x/run b/vendor/github.com/UnnoTed/fileb0x/run new file mode 100644 index 0000000..e21ae38 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/run @@ -0,0 +1,7 @@ +#!/bin/bash + +go install +cd ./_example/simple/ + +./run +cd ../../ diff --git a/vendor/github.com/UnnoTed/fileb0x/run.bat b/vendor/github.com/UnnoTed/fileb0x/run.bat new file mode 100644 index 0000000..0f8c61c --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/run.bat @@ -0,0 +1,11 @@ +go install + +@echo off +cd .\_example\simple\ +@echo on + +call b0x.bat + +@echo off +cd ..\..\ +@echo on \ No newline at end of file diff --git a/vendor/github.com/UnnoTed/fileb0x/template/file.go b/vendor/github.com/UnnoTed/fileb0x/template/file.go new file mode 100644 index 0000000..d11ae95 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/template/file.go @@ -0,0 +1,64 @@ +package template + +var fileTemplate = `{{buildTags .Tags}}// Code generaTed by fileb0x at "{{.Now}}" from config file "{{.ConfigFile}}" DO NOT EDIT. +// modified({{.Modified}}) +// original path: {{.OriginalPath}} + +package {{.Pkg}} + +import ( + {{if .Compression.Compress}} + {{if not .Compression.Keep}} + "bytes" + "compress/gzip" + "io" + {{end}} + {{end}} + "os" +) + +// {{exportedTitle "File"}}{{buildSafeVarName .Path}} is "{{.Path}}" +var {{exportedTitle "File"}}{{buildSafeVarName .Path}} = {{.Data}} + +func init() { + {{if .Compression.Compress}} + {{if not .Compression.Keep}} + rb := bytes.NewReader({{exportedTitle "File"}}{{buildSafeVarName .Path}}) + r, err := gzip.NewReader(rb) + if err != nil { + panic(err) + } + + err = r.Close() + if err != nil { + panic(err) + } + {{end}} + {{end}} + + f, err := {{exported "FS"}}.OpenFile({{exported "CTX"}}, "{{.Path}}", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777) + if err != nil { + panic(err) + } + + {{if .Compression.Compress}} + {{if not .Compression.Keep}} + _, err = io.Copy(f, r) + if err != nil { + panic(err) + } + {{end}} + {{else}} + _, err = f.Write({{exportedTitle "File"}}{{buildSafeVarName .Path}}) + if err != nil { + panic(err) + } + {{end}} + + err = f.Close() + if err != nil { + panic(err) + } +} + +` diff --git a/vendor/github.com/UnnoTed/fileb0x/template/files.go b/vendor/github.com/UnnoTed/fileb0x/template/files.go new file mode 100644 index 0000000..f98f0aa --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/template/files.go @@ -0,0 +1,411 @@ +package template + +var filesTemplate = `{{buildTags .Tags}}// Code generated by fileb0x at "{{.Now}}" from config file "{{.ConfigFile}}" DO NOT EDIT. +// modification hash({{.ModificationHash}}) + +package {{.Pkg}} +{{$Compression := .Compression}} + +import ( + "bytes" + {{if not .Spread}}{{if and $Compression.Compress (not .Debug)}}{{if not $Compression.Keep}}"compress/gzip"{{end}}{{end}}{{end}} + "context" + "io" + "net/http" + "os" + "path" +{{if or .Updater.Enabled .Debug}} + "strings" +{{end}} + + "golang.org/x/net/webdav" + +{{if .Updater.Enabled}} + "crypto/sha256" + "encoding/hex" + "log" + "path/filepath" + + "github.com/labstack/echo" + "github.com/labstack/echo/middleware" +{{end}} +) + +var ( + // CTX is a context for webdav vfs + {{exported "CTX"}} = context.Background() + + {{if .Debug}} + {{exported "FS"}} = webdav.Dir(".") + {{else}} + // FS is a virtual memory file system + {{exported "FS"}} = webdav.NewMemFS() + {{end}} + + // Handler is used to server files through a http handler + {{exportedTitle "Handler"}} *webdav.Handler + + // HTTP is the http file system + {{exportedTitle "HTTP"}} http.FileSystem = new({{exported "HTTPFS"}}) +) + +// HTTPFS implements http.FileSystem +type {{exported "HTTPFS"}} struct { + // Prefix allows to limit the path of all requests. F.e. a prefix "css" would allow only calls to /css/* + Prefix string +} + +{{if (and (not .Spread) (not .Debug))}} +{{range .Files}} +// {{exportedTitle "File"}}{{buildSafeVarName .Path}} is "{{.Path}}" +var {{exportedTitle "File"}}{{buildSafeVarName .Path}} = {{.Data}} +{{end}} +{{end}} + +func init() { + err := {{exported "CTX"}}.Err() + if err != nil { + panic(err) + } + +{{ $length := len .DirList }} +{{ $fLength := len .Files }} +{{ $noDirsButFiles := (and (not .Spread) (eq $length 0) (gt $fLength 0)) }} +{{if not .Debug}} +{{range $index, $dir := .DirList}} + {{if and (ne $dir "./") (ne $dir "/") (ne $dir ".") (ne $dir "")}} + err = {{exported "FS"}}.Mkdir({{exported "CTX"}}, "{{$dir}}", 0777) + if err != nil && err != os.ErrExist { + panic(err) + } + {{end}} +{{end}} +{{end}} + +{{if (and (not .Spread) (not .Debug))}} + {{if not .Updater.Empty}} + var f webdav.File + {{end}} + + {{if $Compression.Compress}} + {{if not $Compression.Keep}} + var rb *bytes.Reader + var r *gzip.Reader + {{end}} + {{end}} + + {{range .Files}} + {{if $Compression.Compress}} + {{if not $Compression.Keep}} + rb = bytes.NewReader({{exportedTitle "File"}}{{buildSafeVarName .Path}}) + r, err = gzip.NewReader(rb) + if err != nil { + panic(err) + } + + err = r.Close() + if err != nil { + panic(err) + } + {{end}} + {{end}} + + f, err = {{exported "FS"}}.OpenFile({{exported "CTX"}}, "{{.Path}}", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777) + if err != nil { + panic(err) + } + + {{if $Compression.Compress}} + {{if not $Compression.Keep}} + _, err = io.Copy(f, r) + if err != nil { + panic(err) + } + {{end}} + {{else}} + _, err = f.Write({{exportedTitle "File"}}{{buildSafeVarName .Path}}) + if err != nil { + panic(err) + } + {{end}} + + err = f.Close() + if err != nil { + panic(err) + } + {{end}} +{{end}} + + {{exportedTitle "Handler"}} = &webdav.Handler{ + FileSystem: FS, + LockSystem: webdav.NewMemLS(), + } + +{{if .Updater.Enabled}} + go func() { + svr := &{{exportedTitle "Server"}}{} + svr.Init() + }() +{{end}} +} + +{{if .Debug}} +var remap = map[string]map[string]string{ + {{.Remap}} +} +{{end}} + +// Open a file +func (hfs *{{exported "HTTPFS"}}) Open(path string) (http.File, error) { + path = hfs.Prefix + path + +{{if .Debug}} + path = strings.TrimPrefix(path, "/") + + for current, f := range remap { + if path == current { + path = f["base"] + strings.TrimPrefix(path, f["prefix"]) + break + } + } + +{{end}} + f, err := {{if .Debug}}os{{else}}{{exported "FS"}}{{end}}.OpenFile({{if not .Debug}}{{exported "CTX"}}, {{end}}path, os.O_RDONLY, 0644) + if err != nil { + return nil, err + } + + return f, nil +} + +// ReadFile is adapTed from ioutil +func {{exportedTitle "ReadFile"}}(path string) ([]byte, error) { + f, err := {{if .Debug}}os{{else}}{{exported "FS"}}{{end}}.OpenFile({{if not .Debug}}{{exported "CTX"}}, {{end}}path, os.O_RDONLY, 0644) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead)) + + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(f) + return buf.Bytes(), err +} + +// WriteFile is adapTed from ioutil +func {{exportedTitle "WriteFile"}}(filename string, data []byte, perm os.FileMode) error { + f, err := {{exported "FS"}}.OpenFile({{exported "CTX"}}, filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// WalkDirs looks for files in the given dir and returns a list of files in it +// usage for all files in the b0x: WalkDirs("", false) +func {{exportedTitle "WalkDirs"}}(name string, includeDirsInList bool, files ...string) ([]string, error) { + f, err := {{exported "FS"}}.OpenFile({{exported "CTX"}}, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + + fileInfos, err := f.Readdir(0) + if err != nil { + return nil, err + } + + err = f.Close() + if err != nil { + return nil, err + } + + for _, info := range fileInfos { + filename := path.Join(name, info.Name()) + + if includeDirsInList || !info.IsDir() { + files = append(files, filename) + } + + if info.IsDir() { + files, err = {{exportedTitle "WalkDirs"}}(filename, includeDirsInList, files...) + if err != nil { + return nil, err + } + } + } + + return files, nil +} + +{{if .Updater.Enabled}} +// Auth holds information for a http basic auth +type {{exportedTitle "Auth"}} struct { + Username string + Password string +} + +// ResponseInit holds a list of hashes from the server +// to be sent to the client so it can check if there +// is a new file or a changed file +type {{exportedTitle "ResponseInit"}} struct { + Success bool + Hashes map[string]string +} + +// Server holds information about the http server +// used to update files remotely +type {{exportedTitle "Server"}} struct { + Auth {{exportedTitle "Auth"}} + Files []string +} + +// Init sets the routes and basic http auth +// before starting the http server +func (s *{{exportedTitle "Server"}}) Init() { + s.Auth = {{exportedTitle "Auth"}}{ + Username: "{{.Updater.Username}}", + Password: "{{.Updater.Password}}", + } + + e := echo.New() + e.Use(middleware.Recover()) + e.Use(s.BasicAuth()) + e.POST("/", s.Post) + e.GET("/", s.Get) + + log.Println("fileb0x updater server is running at port 0.0.0.0:{{.Updater.Port}}") + if err := e.Start(":{{.Updater.Port}}"); err != nil { + panic(err) + } +} + +// Get gives a list of file names and hashes +func (s *{{exportedTitle "Server"}}) Get(c echo.Context) error { + log.Println("[fileb0x.Server]: Hashing server files...") + + // file:hash + hashes := map[string]string{} + + // get all files in the virtual memory file system + var err error + s.Files, err = {{exportedTitle "WalkDirs"}}("", false) + if err != nil { + return err + } + + // get a hash for each file + for _, filePath := range s.Files { + f, err := FS.OpenFile(CTX, filePath, os.O_RDONLY, 0644) + if err != nil { + return err + } + + hash := sha256.New() + _, err = io.Copy(hash, f) + if err != nil { + return err + } + + hashes[filePath] = hex.EncodeToString(hash.Sum(nil)) + } + + log.Println("[fileb0x.Server]: Done hashing files") + return c.JSON(http.StatusOK, &ResponseInit{ + Success: true, + Hashes: hashes, + }) +} + +// Post is used to upload a file and replace +// it in the virtual memory file system +func (s *{{exportedTitle "Server"}}) Post(c echo.Context) error { + file, err := c.FormFile("file") + if err != nil { + return err + } + + log.Println("[fileb0x.Server]:", file.Filename, "Found request to upload a file") + + src, err := file.Open() + if err != nil { + return err + } + defer src.Close() + + + newDir := filepath.Dir(file.Filename) + _, err = {{exported "FS"}}.Stat({{exported "CTX"}}, newDir) + if err != nil && strings.HasSuffix(err.Error(), os.ErrNotExist.Error()) { + log.Println("[fileb0x.Server]: Creating dir tree", newDir) + list := strings.Split(newDir, "/") + var tree string + + for _, dir := range list { + if dir == "" || dir == "." || dir == "/" || dir == "./" { + continue + } + + tree += dir + "/" + err = {{exported "FS"}}.Mkdir({{exported "CTX"}}, tree, 0777) + if err != nil && err != os.ErrExist { + log.Println("failed", err) + return err + } + } + } + + log.Println("[fileb0x.Server]:", file.Filename, "Opening file...") + f, err := {{exported "FS"}}.OpenFile({{exported "CTX"}}, file.Filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777) + if err != nil && !strings.HasSuffix(err.Error(), os.ErrNotExist.Error()) { + return err + } + + log.Println("[fileb0x.Server]:", file.Filename, "Writing file into Virutal Memory FileSystem...") + if _, err = io.Copy(f, src); err != nil { + return err + } + + if err = f.Close(); err != nil { + return err + } + + log.Println("[fileb0x.Server]:", file.Filename, "Done writing file") + return c.String(http.StatusOK, "ok") +} + +// BasicAuth is a middleware to check if +// the username and password are valid +// echo's middleware isn't used because of golint issues +func (s *{{exportedTitle "Server"}}) BasicAuth() echo.MiddlewareFunc { + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + u, p, _ := c.Request().BasicAuth() + if u != s.Auth.Username || p != s.Auth.Password { + return echo.ErrUnauthorized + } + + return next(c) + } + } +} +{{end}} +` diff --git a/vendor/github.com/UnnoTed/fileb0x/template/funcs.go b/vendor/github.com/UnnoTed/fileb0x/template/funcs.go new file mode 100644 index 0000000..1da9a2c --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/template/funcs.go @@ -0,0 +1,130 @@ +package template + +import ( + "regexp" + "strconv" + "strings" + "text/template" + + "github.com/UnnoTed/fileb0x/config" +) + +var safeNameBlacklist = map[string]string{} +var blacklist = map[string]int{} + +// taken from golint @ https://github.com/golang/lint/blob/master/lint.go#L702 +var commonInitialisms = map[string]bool{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} + +var r = regexp.MustCompile(`[^a-zA-Z0-9]`) + +var funcsTemplate = template.FuncMap{ + "exported": exported, + "buildTags": buildTags, + "exportedTitle": exportedTitle, + "buildSafeVarName": buildSafeVarName, +} + +var unexported bool + +// SetUnexported variables, functions and types +func SetUnexported(e bool) { + unexported = e +} + +func exported(field string) string { + if !unexported { + return strings.ToUpper(field) + } + + return strings.ToLower(field) +} + +func exportedTitle(field string) string { + if !unexported { + return strings.Title(field) + } + + return strings.ToLower(field[0:1]) + field[1:] +} + +func buildSafeVarName(path string) string { + name, exists := safeNameBlacklist[path] + if exists { + return name + } + + n := config.SafeVarName.ReplaceAllString(path, "$") + words := strings.Split(n, "$") + + name = "" + // check for uppercase words + for _, word := range words { + upper := strings.ToUpper(word) + + if commonInitialisms[upper] { + name += upper + } else { + name += strings.Title(word) + } + } + + // avoid redeclaring variables + // + // _file.txt + // file.txt + _, blacklisted := blacklist[name] + + if blacklisted { + blacklist[name]++ + name += strconv.Itoa(blacklist[name]) + } + + safeNameBlacklist[path] = name + blacklist[name]++ + return name +} + +func buildTags(tags string) string { + if tags != "" { + tags = "// +build " + tags + "\n" + } + + return tags +} diff --git a/vendor/github.com/UnnoTed/fileb0x/template/template.go b/vendor/github.com/UnnoTed/fileb0x/template/template.go new file mode 100644 index 0000000..20b3768 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/template/template.go @@ -0,0 +1,49 @@ +package template + +import ( + "bytes" + "errors" + "text/template" +) + +// Template holds b0x and file template +type Template struct { + template string + + name string + Variables interface{} +} + +// Set the template to be used +// "files" or "file" +func (t *Template) Set(name string) error { + t.name = name + if name != "files" && name != "file" { + return errors.New(`Error: Template must be "files" or "file"`) + } + + if name == "files" { + t.template = filesTemplate + } else if name == "file" { + t.template = fileTemplate + } + + return nil +} + +// Exec the template and return the final data as byte array +func (t *Template) Exec() ([]byte, error) { + tmpl, err := template.New(t.name).Funcs(funcsTemplate).Parse(t.template) + if err != nil { + return nil, err + } + + // exec template + buff := bytes.NewBufferString("") + err = tmpl.Execute(buff, t.Variables) + if err != nil { + return nil, err + } + + return buff.Bytes(), nil +} diff --git a/vendor/github.com/UnnoTed/fileb0x/test.bat b/vendor/github.com/UnnoTed/fileb0x/test.bat new file mode 100644 index 0000000..5ebac88 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/test.bat @@ -0,0 +1 @@ +go test ./... -v \ No newline at end of file diff --git a/vendor/github.com/UnnoTed/fileb0x/updater/config.go b/vendor/github.com/UnnoTed/fileb0x/updater/config.go new file mode 100644 index 0000000..deb882f --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/updater/config.go @@ -0,0 +1,40 @@ +package updater + +import ( + "errors" + "os" +) + +type Config struct { + IsUpdating bool + Username string + Password string + Enabled bool + Workers int + Empty bool + Port int +} + +func (u Config) CheckInfo() error { + if !u.Enabled { + return nil + } + + if u.Username == "{FROM_ENV}" || u.Username == "" { + u.Username = os.Getenv("fileb0x_username") + } + + if u.Password == "{FROM_ENV}" || u.Password == "" { + u.Password = os.Getenv("fileb0x_password") + } + + // check for empty username and password + if u.Username == "" { + return errors.New("fileb0x: You must provide an username in the config file or through an env var: fileb0x_username") + + } else if u.Password == "" { + return errors.New("fileb0x: You must provide an password in the config file or through an env var: fileb0x_password") + } + + return nil +} diff --git a/vendor/github.com/UnnoTed/fileb0x/updater/updater.go b/vendor/github.com/UnnoTed/fileb0x/updater/updater.go new file mode 100644 index 0000000..966dc5d --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/updater/updater.go @@ -0,0 +1,366 @@ +package updater + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime/multipart" + "net/http" + "os" + "strings" + + "encoding/hex" + + "encoding/json" + + "github.com/UnnoTed/fileb0x/file" + "github.com/airking05/termui" +) + +// Auth holds authentication for the http basic auth +type Auth struct { + Username string + Password string +} + +// ResponseInit holds a list of hashes from the server +// to be sent to the client so it can check if there +// is a new file or a changed file +type ResponseInit struct { + Success bool + Hashes map[string]string +} + +// ProgressReader implements a io.Reader with a Read +// function that lets a callback report how much +// of the file was read +type ProgressReader struct { + io.Reader + Reporter func(r int64) +} + +func (pr *ProgressReader) Read(p []byte) (n int, err error) { + n, err = pr.Reader.Read(p) + pr.Reporter(int64(n)) + return +} + +// Updater sends files that should be update to the b0x server +type Updater struct { + Server string + Auth Auth + ui []termui.Bufferer + + RemoteHashes map[string]string + LocalHashes map[string]string + ToUpdate []string + Workers int +} + +// Init gets the list of file hash from the server +func (up *Updater) Init() error { + return up.Get() +} + +// Get gets the list of file hash from the server +func (up *Updater) Get() error { + log.Println("Creating hash list request...") + req, err := http.NewRequest("GET", up.Server, nil) + if err != nil { + return err + } + + req.SetBasicAuth(up.Auth.Username, up.Auth.Password) + + log.Println("Sending hash list request...") + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return err + } + + if resp.StatusCode == http.StatusUnauthorized { + return errors.New("Error Unautorized") + } + + log.Println("Reading hash list response's body...") + var buf bytes.Buffer + _, err = buf.ReadFrom(resp.Body) + if err != nil { + return err + } + + log.Println("Parsing hash list response's body...") + ri := &ResponseInit{} + err = json.Unmarshal(buf.Bytes(), &ri) + if err != nil { + log.Println("Body is", buf.Bytes()) + return err + } + resp.Body.Close() + + // copy hash list + if ri.Success { + log.Println("Copying hash list...") + up.RemoteHashes = ri.Hashes + up.LocalHashes = map[string]string{} + log.Println("Done") + } + + return nil +} + +// Updatable checks if there is any file that should be updaTed +func (up *Updater) Updatable(files map[string]*file.File) (bool, error) { + hasUpdates := !up.EqualHashes(files) + + if hasUpdates { + log.Println("----------------------------------------") + log.Println("-- Found files that should be updated --") + log.Println("----------------------------------------") + } else { + log.Println("-----------------------") + log.Println("-- Nothing to update --") + log.Println("-----------------------") + } + + return hasUpdates, nil +} + +// EqualHash checks if a local file hash equals a remote file hash +// it returns false when a remote file hash isn't found (new files) +func (up *Updater) EqualHash(name string) bool { + hash, existsLocally := up.LocalHashes[name] + _, existsRemotely := up.RemoteHashes[name] + if !existsRemotely || !existsLocally || hash != up.RemoteHashes[name] { + if hash != up.RemoteHashes[name] { + log.Println("Found changes in file: ", name) + + } else if !existsRemotely && existsLocally { + log.Println("Found new file: ", name) + } + + return false + } + + return true +} + +// EqualHashes builds the list of local hashes before +// checking if there is any that should be updated +func (up *Updater) EqualHashes(files map[string]*file.File) bool { + for _, f := range files { + log.Println("Checking file for changes:", f.Path) + + if len(f.Bytes) == 0 && !f.ReplacedText { + data, err := ioutil.ReadFile(f.OriginalPath) + if err != nil { + panic(err) + } + + f.Bytes = data + + // removes the []byte("") from the string + // when the data isn't in the Bytes variable + } else if len(f.Bytes) == 0 && f.ReplacedText && len(f.Data) > 0 { + f.Data = strings.TrimPrefix(f.Data, `[]byte("`) + f.Data = strings.TrimSuffix(f.Data, `")`) + f.Data = strings.Replace(f.Data, "\\x", "", -1) + + var err error + f.Bytes, err = hex.DecodeString(f.Data) + if err != nil { + log.Println("SHIT", err) + return false + } + + f.Data = "" + } + + sha := sha256.New() + if _, err := sha.Write(f.Bytes); err != nil { + panic(err) + return false + } + + up.LocalHashes[f.Path] = hex.EncodeToString(sha.Sum(nil)) + } + + // check if there is any file to update + update := false + for k := range up.LocalHashes { + if !up.EqualHash(k) { + up.ToUpdate = append(up.ToUpdate, k) + update = true + } + } + + return !update +} + +type job struct { + current int + files *file.File + total int +} + +// UpdateFiles sends all files that should be updated to the server +// the limit is 3 concurrent files at once +func (up *Updater) UpdateFiles(files map[string]*file.File) error { + updatable, err := up.Updatable(files) + if err != nil { + return err + } + + if !updatable { + return nil + } + + // everything's height + height := 3 + err = termui.Init() + if err != nil { + panic(err) + } + defer termui.Close() + + // info text + p := termui.NewPar("PRESS ANY KEY TO QUIT") + p.Height = height + p.Width = 50 + p.TextFgColor = termui.ColorWhite + up.ui = append(up.ui, p) + + doneTotal := 0 + total := len(up.ToUpdate) + jobs := make(chan *job, total) + done := make(chan bool, total) + + if up.Workers <= 0 { + up.Workers = 1 + } + + // just so it can listen to events + go func() { + termui.Loop() + }() + + // cancel with any key + termui.Handle("/sys/kbd", func(termui.Event) { + termui.StopLoop() + os.Exit(1) + }) + + // stops rendering when total is reached + go func(upp *Updater, d *int) { + for { + if *d >= total { + break + } + + termui.Render(upp.ui...) + } + }(up, &doneTotal) + + for i := 0; i < up.Workers; i++ { + // creates a progress bar + g := termui.NewGauge() + g.Width = termui.TermWidth() + g.Height = height + g.BarColor = termui.ColorBlue + g.Y = len(up.ui) * height + up.ui = append(up.ui, g) + + go up.worker(jobs, done, g) + } + + for i, name := range up.ToUpdate { + jobs <- &job{ + current: i + 1, + files: files[name], + total: total, + } + } + close(jobs) + + for i := 0; i < total; i++ { + <-done + doneTotal++ + } + + return nil +} + +func (up *Updater) worker(jobs <-chan *job, done chan<- bool, g *termui.Gauge) { + for job := range jobs { + f := job.files + fr := bytes.NewReader(f.Bytes) + g.BorderLabel = fmt.Sprintf("%d/%d %s", job.current, job.total, f.Path) + + // updates progress bar's percentage + var total int64 + pr := &ProgressReader{fr, func(r int64) { + total += r + g.Percent = int(float64(total) / float64(fr.Size()) * 100) + }} + + r, w := io.Pipe() + writer := multipart.NewWriter(w) + + // copy the file into the form + go func(fr *ProgressReader) { + defer w.Close() + part, err := writer.CreateFormFile("file", f.Path) + if err != nil { + panic(err) + } + + _, err = io.Copy(part, fr) + if err != nil { + panic(err) + } + + err = writer.Close() + if err != nil { + panic(err) + } + }(pr) + + // create a post request with basic auth + // and the file included in a form + req, err := http.NewRequest("POST", up.Server, r) + if err != nil { + panic(err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + req.SetBasicAuth(up.Auth.Username, up.Auth.Password) + + // sends the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + panic(err) + } + + body := &bytes.Buffer{} + _, err = body.ReadFrom(resp.Body) + if err != nil { + panic(err) + } + + if err := resp.Body.Close(); err != nil { + panic(err) + } + + if body.String() != "ok" { + panic(body.String()) + } + + done <- true + } +} diff --git a/vendor/github.com/UnnoTed/fileb0x/utils/utils.go b/vendor/github.com/UnnoTed/fileb0x/utils/utils.go new file mode 100644 index 0000000..fd060f7 --- /dev/null +++ b/vendor/github.com/UnnoTed/fileb0x/utils/utils.go @@ -0,0 +1,35 @@ +package utils + +import ( + "os" + "path/filepath" + "strings" +) + +// FixPath converts \ and \\ to / +func FixPath(path string) string { + a := filepath.Clean(path) + b := strings.Replace(a, `\`, "/", -1) + c := strings.Replace(b, `\\`, "/", -1) + return c +} + +// FixName converts [/ to _](1), [ to -](2) and [, to __](3) +func FixName(path string) string { + a := FixPath(path) + b := strings.Replace(a, "/", "_", -1) // / to _ + c := strings.Replace(b, " ", "-", -1) // {space} to - + return strings.Replace(c, ",", "__", -1) // , to __ +} + +// GetCurrentDir gets the directory where the application was run +func GetCurrentDir() (string, error) { + d, err := filepath.Abs(filepath.Dir(os.Args[0])) + return d, err +} + +// Exists returns true when a folder/file exists +func Exists(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} diff --git a/vendor/github.com/airking05/termui/.gitignore b/vendor/github.com/airking05/termui/.gitignore new file mode 100644 index 0000000..8b156b0 --- /dev/null +++ b/vendor/github.com/airking05/termui/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.DS_Store +/vendor diff --git a/vendor/github.com/airking05/termui/.travis.yml b/vendor/github.com/airking05/termui/.travis.yml new file mode 100644 index 0000000..206e887 --- /dev/null +++ b/vendor/github.com/airking05/termui/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - tip + +script: go test -v ./ \ No newline at end of file diff --git a/vendor/github.com/airking05/termui/LICENSE b/vendor/github.com/airking05/termui/LICENSE new file mode 100644 index 0000000..311ccc7 --- /dev/null +++ b/vendor/github.com/airking05/termui/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Zack Guo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/airking05/termui/README.md b/vendor/github.com/airking05/termui/README.md new file mode 100644 index 0000000..d5f3d9a --- /dev/null +++ b/vendor/github.com/airking05/termui/README.md @@ -0,0 +1,151 @@ +# termui [![Build Status](https://travis-ci.org/gizak/termui.svg?branch=master)](https://travis-ci.org/gizak/termui) [![Doc Status](https://godoc.org/github.com/gizak/termui?status.png)](https://godoc.org/github.com/gizak/termui) + +demo cast under osx 10.10; Terminal.app; Menlo Regular 12pt.) + +`termui` is a cross-platform, easy-to-compile, and fully-customizable terminal dashboard. It is inspired by [blessed-contrib](https://github.com/yaronn/blessed-contrib), but purely in Go. + +Now version v2 has arrived! It brings new event system, new theme system, new `Buffer` interface and specific colour text rendering. (some docs are missing, but it will be completed soon!) + +## Installation + +`master` mirrors v2 branch, to install: + + go get -u github.com/gizak/termui + +It is recommanded to use locked deps by using [glide](https://glide.sh): move to `termui` src directory then run `glide up`. + +For the compatible reason, you can choose to install the legacy version of `termui`: + + go get gopkg.in/gizak/termui.v1 + +## Usage + +### Layout + +To use `termui`, the very first thing you may want to know is how to manage layout. `termui` offers two ways of doing this, known as absolute layout and grid layout. + +__Absolute layout__ + +Each widget has an underlying block structure which basically is a box model. It has border, label and padding properties. A border of a widget can be chosen to hide or display (with its border label), you can pick a different front/back colour for the border as well. To display such a widget at a specific location in terminal window, you need to assign `.X`, `.Y`, `.Height`, `.Width` values for each widget before sending it to `.Render`. Let's demonstrate these by a code snippet: + +`````go + import ui "github.com/gizak/termui" // <- ui shortcut, optional + + func main() { + err := ui.Init() + if err != nil { + panic(err) + } + defer ui.Close() + + p := ui.NewPar(":PRESS q TO QUIT DEMO") + p.Height = 3 + p.Width = 50 + p.TextFgColor = ui.ColorWhite + p.BorderLabel = "Text Box" + p.BorderFg = ui.ColorCyan + + g := ui.NewGauge() + g.Percent = 50 + g.Width = 50 + g.Height = 3 + g.Y = 11 + g.BorderLabel = "Gauge" + g.BarColor = ui.ColorRed + g.BorderFg = ui.ColorWhite + g.BorderLabelFg = ui.ColorCyan + + ui.Render(p, g) // feel free to call Render, it's async and non-block + + // event handler... + } +````` + +Note that components can be overlapped (I'd rather call this a feature...), `Render(rs ...Renderer)` renders its args from left to right (i.e. each component's weight is arising from left to right). + +__Grid layout:__ + +grid + +Grid layout uses [12 columns grid system](http://www.w3schools.com/bootstrap/bootstrap_grid_system.asp) with expressive syntax. To use `Grid`, all we need to do is build a widget tree consisting of `Row`s and `Col`s (Actually a `Col` is also a `Row` but with a widget endpoint attached). + +```go + import ui "github.com/gizak/termui" + // init and create widgets... + + // build + ui.Body.AddRows( + ui.NewRow( + ui.NewCol(6, 0, widget0), + ui.NewCol(6, 0, widget1)), + ui.NewRow( + ui.NewCol(3, 0, widget2), + ui.NewCol(3, 0, widget30, widget31, widget32), + ui.NewCol(6, 0, widget4))) + + // calculate layout + ui.Body.Align() + + ui.Render(ui.Body) +``` + +### Events + +`termui` ships with a http-like event mux handling system. All events are channeled up from different sources (typing, click, windows resize, custom event) and then encoded as universal `Event` object. `Event.Path` indicates the event type and `Event.Data` stores the event data struct. Add a handler to a certain event is easy as below: + +```go + // handle key q pressing + ui.Handle("/sys/kbd/q", func(ui.Event) { + // press q to quit + ui.StopLoop() + }) + + ui.Handle("/sys/kbd/C-x", func(ui.Event) { + // handle Ctrl + x combination + }) + + ui.Handle("/sys/kbd", func(ui.Event) { + // handle all other key pressing + }) + + // handle a 1s timer + ui.Handle("/timer/1s", func(e ui.Event) { + t := e.Data.(ui.EvtTimer) + // t is a EvtTimer + if t.Count%2 ==0 { + // do something + } + }) + + ui.Loop() // block until StopLoop is called +``` + +### Widgets + +Click image to see the corresponding demo codes. + +[par](https://github.com/gizak/termui/blob/master/_example/par.go) +[list](https://github.com/gizak/termui/blob/master/_example/list.go) +[gauge](https://github.com/gizak/termui/blob/master/_example/gauge.go) +[linechart](https://github.com/gizak/termui/blob/master/_example/linechart.go) +[barchart](https://github.com/gizak/termui/blob/master/_example/barchart.go) +[barchart](https://github.com/gizak/termui/blob/master/_example/mbarchart.go) +[sparklines](https://github.com/gizak/termui/blob/master/_example/sparklines.go) +[table](https://github.com/gizak/termui/blob/master/_example/table.go) + +## GoDoc + +[godoc](https://godoc.org/github.com/gizak/termui) + +## TODO + +- [x] Grid layout +- [x] Event system +- [x] Canvas widget +- [x] Refine APIs +- [ ] Focusable widgets + +## Changelog + +## License +This library is under the [MIT License](http://opensource.org/licenses/MIT) diff --git a/vendor/github.com/airking05/termui/barchart.go b/vendor/github.com/airking05/termui/barchart.go new file mode 100644 index 0000000..6560c8b --- /dev/null +++ b/vendor/github.com/airking05/termui/barchart.go @@ -0,0 +1,149 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "fmt" + +// BarChart creates multiple bars in a widget: +/* + bc := termui.NewBarChart() + data := []int{3, 2, 5, 3, 9, 5} + bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"} + bc.BorderLabel = "Bar Chart" + bc.Data = data + bc.Width = 26 + bc.Height = 10 + bc.DataLabels = bclabels + bc.TextColor = termui.ColorGreen + bc.BarColor = termui.ColorRed + bc.NumColor = termui.ColorYellow +*/ +type BarChart struct { + Block + BarColor Attribute + TextColor Attribute + NumColor Attribute + Data []int + DataLabels []string + BarWidth int + BarGap int + CellChar rune + labels [][]rune + dataNum [][]rune + numBar int + scale float64 + max int +} + +// NewBarChart returns a new *BarChart with current theme. +func NewBarChart() *BarChart { + bc := &BarChart{Block: *NewBlock()} + bc.BarColor = ThemeAttr("barchart.bar.bg") + bc.NumColor = ThemeAttr("barchart.num.fg") + bc.TextColor = ThemeAttr("barchart.text.fg") + bc.BarGap = 1 + bc.BarWidth = 3 + bc.CellChar = ' ' + return bc +} + +func (bc *BarChart) layout() { + bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth) + bc.labels = make([][]rune, bc.numBar) + bc.dataNum = make([][]rune, len(bc.Data)) + + for i := 0; i < bc.numBar && i < len(bc.DataLabels) && i < len(bc.Data); i++ { + bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth) + n := bc.Data[i] + s := fmt.Sprint(n) + bc.dataNum[i] = trimStr2Runes(s, bc.BarWidth) + } + + //bc.max = bc.Data[0] // what if Data is nil? Sometimes when bar graph is nill it produces panic with panic: runtime error: index out of range + // Asign a negative value to get maxvalue auto-populates + if bc.max == 0 { + bc.max = -1 + } + for i := 0; i < len(bc.Data); i++ { + if bc.max < bc.Data[i] { + bc.max = bc.Data[i] + } + } + bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1) +} + +func (bc *BarChart) SetMax(max int) { + + if max > 0 { + bc.max = max + } +} + +// Buffer implements Bufferer interface. +func (bc *BarChart) Buffer() Buffer { + buf := bc.Block.Buffer() + bc.layout() + + for i := 0; i < bc.numBar && i < len(bc.Data) && i < len(bc.DataLabels); i++ { + h := int(float64(bc.Data[i]) / bc.scale) + oftX := i * (bc.BarWidth + bc.BarGap) + + barBg := bc.Bg + barFg := bc.BarColor + + if bc.CellChar == ' ' { + barBg = bc.BarColor + barFg = ColorDefault + if bc.BarColor == ColorDefault { // the same as above + barBg |= AttrReverse + } + } + + // plot bar + for j := 0; j < bc.BarWidth; j++ { + for k := 0; k < h; k++ { + c := Cell{ + Ch: bc.CellChar, + Bg: barBg, + Fg: barFg, + } + + x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k + buf.Set(x, y, c) + } + } + // plot text + for j, k := 0, 0; j < len(bc.labels[i]); j++ { + w := charWidth(bc.labels[i][j]) + c := Cell{ + Ch: bc.labels[i][j], + Bg: bc.Bg, + Fg: bc.TextColor, + } + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1 + x := bc.innerArea.Min.X + oftX + k + buf.Set(x, y, c) + k += w + } + // plot num + for j := 0; j < len(bc.dataNum[i]); j++ { + c := Cell{ + Ch: bc.dataNum[i][j], + Fg: bc.NumColor, + Bg: barBg, + } + + if h == 0 { + c.Bg = bc.Bg + } + x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i]))/2 + j + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 + buf.Set(x, y, c) + } + } + + return buf +} diff --git a/vendor/github.com/airking05/termui/block.go b/vendor/github.com/airking05/termui/block.go new file mode 100644 index 0000000..3e8571b --- /dev/null +++ b/vendor/github.com/airking05/termui/block.go @@ -0,0 +1,240 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "image" + +// Hline is a horizontal line. +type Hline struct { + X int + Y int + Len int + Fg Attribute + Bg Attribute +} + +// Vline is a vertical line. +type Vline struct { + X int + Y int + Len int + Fg Attribute + Bg Attribute +} + +// Buffer draws a horizontal line. +func (l Hline) Buffer() Buffer { + if l.Len <= 0 { + return NewBuffer() + } + return NewFilledBuffer(l.X, l.Y, l.X+l.Len, l.Y+1, HORIZONTAL_LINE, l.Fg, l.Bg) +} + +// Buffer draws a vertical line. +func (l Vline) Buffer() Buffer { + if l.Len <= 0 { + return NewBuffer() + } + return NewFilledBuffer(l.X, l.Y, l.X+1, l.Y+l.Len, VERTICAL_LINE, l.Fg, l.Bg) +} + +// Buffer draws a box border. +func (b Block) drawBorder(buf Buffer) { + if !b.Border { + return + } + + min := b.area.Min + max := b.area.Max + + x0 := min.X + y0 := min.Y + x1 := max.X - 1 + y1 := max.Y - 1 + + // draw lines + if b.BorderTop { + buf.Merge(Hline{x0, y0, x1 - x0, b.BorderFg, b.BorderBg}.Buffer()) + } + if b.BorderBottom { + buf.Merge(Hline{x0, y1, x1 - x0, b.BorderFg, b.BorderBg}.Buffer()) + } + if b.BorderLeft { + buf.Merge(Vline{x0, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer()) + } + if b.BorderRight { + buf.Merge(Vline{x1, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer()) + } + + // draw corners + if b.BorderTop && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 0 { + buf.Set(x0, y0, Cell{TOP_LEFT, b.BorderFg, b.BorderBg}) + } + if b.BorderTop && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 0 { + buf.Set(x1, y0, Cell{TOP_RIGHT, b.BorderFg, b.BorderBg}) + } + if b.BorderBottom && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 1 { + buf.Set(x0, y1, Cell{BOTTOM_LEFT, b.BorderFg, b.BorderBg}) + } + if b.BorderBottom && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 1 { + buf.Set(x1, y1, Cell{BOTTOM_RIGHT, b.BorderFg, b.BorderBg}) + } +} + +func (b Block) drawBorderLabel(buf Buffer) { + maxTxtW := b.area.Dx() - 2 + tx := DTrimTxCls(DefaultTxBuilder.Build(b.BorderLabel, b.BorderLabelFg, b.BorderLabelBg), maxTxtW) + + for i, w := 0, 0; i < len(tx); i++ { + buf.Set(b.area.Min.X+1+w, b.area.Min.Y, tx[i]) + w += tx[i].Width() + } +} + +// Block is a base struct for all other upper level widgets, +// consider it as css: display:block. +// Normally you do not need to create it manually. +type Block struct { + area image.Rectangle + innerArea image.Rectangle + X int + Y int + Border bool + BorderFg Attribute + BorderBg Attribute + BorderLeft bool + BorderRight bool + BorderTop bool + BorderBottom bool + BorderLabel string + BorderLabelFg Attribute + BorderLabelBg Attribute + Display bool + Bg Attribute + Width int + Height int + PaddingTop int + PaddingBottom int + PaddingLeft int + PaddingRight int + id string + Float Align +} + +// NewBlock returns a *Block which inherits styles from current theme. +func NewBlock() *Block { + b := Block{} + b.Display = true + b.Border = true + b.BorderLeft = true + b.BorderRight = true + b.BorderTop = true + b.BorderBottom = true + b.BorderBg = ThemeAttr("border.bg") + b.BorderFg = ThemeAttr("border.fg") + b.BorderLabelBg = ThemeAttr("label.bg") + b.BorderLabelFg = ThemeAttr("label.fg") + b.Bg = ThemeAttr("block.bg") + b.Width = 2 + b.Height = 2 + b.id = GenId() + b.Float = AlignNone + return &b +} + +func (b Block) Id() string { + return b.id +} + +// Align computes box model +func (b *Block) Align() { + // outer + b.area.Min.X = 0 + b.area.Min.Y = 0 + b.area.Max.X = b.Width + b.area.Max.Y = b.Height + + // float + b.area = AlignArea(TermRect(), b.area, b.Float) + b.area = MoveArea(b.area, b.X, b.Y) + + // inner + b.innerArea.Min.X = b.area.Min.X + b.PaddingLeft + b.innerArea.Min.Y = b.area.Min.Y + b.PaddingTop + b.innerArea.Max.X = b.area.Max.X - b.PaddingRight + b.innerArea.Max.Y = b.area.Max.Y - b.PaddingBottom + + if b.Border { + if b.BorderLeft { + b.innerArea.Min.X++ + } + if b.BorderRight { + b.innerArea.Max.X-- + } + if b.BorderTop { + b.innerArea.Min.Y++ + } + if b.BorderBottom { + b.innerArea.Max.Y-- + } + } +} + +// InnerBounds returns the internal bounds of the block after aligning and +// calculating the padding and border, if any. +func (b *Block) InnerBounds() image.Rectangle { + b.Align() + return b.innerArea +} + +// Buffer implements Bufferer interface. +// Draw background and border (if any). +func (b *Block) Buffer() Buffer { + b.Align() + + buf := NewBuffer() + buf.SetArea(b.area) + buf.Fill(' ', ColorDefault, b.Bg) + + b.drawBorder(buf) + b.drawBorderLabel(buf) + + return buf +} + +// GetHeight implements GridBufferer. +// It returns current height of the block. +func (b Block) GetHeight() int { + return b.Height +} + +// SetX implements GridBufferer interface, which sets block's x position. +func (b *Block) SetX(x int) { + b.X = x +} + +// SetY implements GridBufferer interface, it sets y position for block. +func (b *Block) SetY(y int) { + b.Y = y +} + +// SetWidth implements GridBuffer interface, it sets block's width. +func (b *Block) SetWidth(w int) { + b.Width = w +} + +func (b Block) InnerWidth() int { + return b.innerArea.Dx() +} + +func (b Block) InnerHeight() int { + return b.innerArea.Dy() +} + +func (b Block) InnerX() int { + return b.innerArea.Min.X +} + +func (b Block) InnerY() int { return b.innerArea.Min.Y } diff --git a/vendor/github.com/airking05/termui/block_common.go b/vendor/github.com/airking05/termui/block_common.go new file mode 100644 index 0000000..6438bf2 --- /dev/null +++ b/vendor/github.com/airking05/termui/block_common.go @@ -0,0 +1,20 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +// +build !windows + +package termui + +const TOP_RIGHT = '┐' +const VERTICAL_LINE = '│' +const HORIZONTAL_LINE = '─' +const TOP_LEFT = '┌' +const BOTTOM_RIGHT = '┘' +const BOTTOM_LEFT = '└' +const VERTICAL_LEFT = '┤' +const VERTICAL_RIGHT = '├' +const HORIZONTAL_DOWN = '┬' +const HORIZONTAL_UP = '┴' +const QUOTA_LEFT = '«' +const QUOTA_RIGHT = '»' diff --git a/vendor/github.com/airking05/termui/block_windows.go b/vendor/github.com/airking05/termui/block_windows.go new file mode 100644 index 0000000..a4fba77 --- /dev/null +++ b/vendor/github.com/airking05/termui/block_windows.go @@ -0,0 +1,14 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +// +build windows + +package termui + +const TOP_RIGHT = '+' +const VERTICAL_LINE = '|' +const HORIZONTAL_LINE = '-' +const TOP_LEFT = '+' +const BOTTOM_RIGHT = '+' +const BOTTOM_LEFT = '+' diff --git a/vendor/github.com/airking05/termui/buffer.go b/vendor/github.com/airking05/termui/buffer.go new file mode 100644 index 0000000..9e3a973 --- /dev/null +++ b/vendor/github.com/airking05/termui/buffer.go @@ -0,0 +1,106 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "image" + +// Cell is a rune with assigned Fg and Bg +type Cell struct { + Ch rune + Fg Attribute + Bg Attribute +} + +// Buffer is a renderable rectangle cell data container. +type Buffer struct { + Area image.Rectangle // selected drawing area + CellMap map[image.Point]Cell +} + +// At returns the cell at (x,y). +func (b Buffer) At(x, y int) Cell { + return b.CellMap[image.Pt(x, y)] +} + +// Set assigns a char to (x,y) +func (b Buffer) Set(x, y int, c Cell) { + b.CellMap[image.Pt(x, y)] = c +} + +// Bounds returns the domain for which At can return non-zero color. +func (b Buffer) Bounds() image.Rectangle { + x0, y0, x1, y1 := 0, 0, 0, 0 + for p := range b.CellMap { + if p.X > x1 { + x1 = p.X + } + if p.X < x0 { + x0 = p.X + } + if p.Y > y1 { + y1 = p.Y + } + if p.Y < y0 { + y0 = p.Y + } + } + return image.Rect(x0, y0, x1+1, y1+1) +} + +// SetArea assigns a new rect area to Buffer b. +func (b *Buffer) SetArea(r image.Rectangle) { + b.Area.Max = r.Max + b.Area.Min = r.Min +} + +// Sync sets drawing area to the buffer's bound +func (b *Buffer) Sync() { + b.SetArea(b.Bounds()) +} + +// NewCell returns a new cell +func NewCell(ch rune, fg, bg Attribute) Cell { + return Cell{ch, fg, bg} +} + +// Merge merges bs Buffers onto b +func (b *Buffer) Merge(bs ...Buffer) { + for _, buf := range bs { + for p, v := range buf.CellMap { + b.Set(p.X, p.Y, v) + } + b.SetArea(b.Area.Union(buf.Area)) + } +} + +// NewBuffer returns a new Buffer +func NewBuffer() Buffer { + return Buffer{ + CellMap: make(map[image.Point]Cell), + Area: image.Rectangle{}} +} + +// Fill fills the Buffer b with ch,fg and bg. +func (b Buffer) Fill(ch rune, fg, bg Attribute) { + for x := b.Area.Min.X; x < b.Area.Max.X; x++ { + for y := b.Area.Min.Y; y < b.Area.Max.Y; y++ { + b.Set(x, y, Cell{ch, fg, bg}) + } + } +} + +// NewFilledBuffer returns a new Buffer filled with ch, fb and bg. +func NewFilledBuffer(x0, y0, x1, y1 int, ch rune, fg, bg Attribute) Buffer { + buf := NewBuffer() + buf.Area.Min = image.Pt(x0, y0) + buf.Area.Max = image.Pt(x1, y1) + + for x := buf.Area.Min.X; x < buf.Area.Max.X; x++ { + for y := buf.Area.Min.Y; y < buf.Area.Max.Y; y++ { + buf.Set(x, y, Cell{ch, fg, bg}) + } + } + return buf +} diff --git a/vendor/github.com/airking05/termui/canvas.go b/vendor/github.com/airking05/termui/canvas.go new file mode 100644 index 0000000..6d2513e --- /dev/null +++ b/vendor/github.com/airking05/termui/canvas.go @@ -0,0 +1,72 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +/* +dots: + ,___, + |1 4| + |2 5| + |3 6| + |7 8| + ````` +*/ + +var brailleBase = '\u2800' + +var brailleOftMap = [4][2]rune{ + {'\u0001', '\u0008'}, + {'\u0002', '\u0010'}, + {'\u0004', '\u0020'}, + {'\u0040', '\u0080'}} + +// Canvas contains drawing map: i,j -> rune +type Canvas map[[2]int]rune + +// NewCanvas returns an empty Canvas +func NewCanvas() Canvas { + return make(map[[2]int]rune) +} + +func chOft(x, y int) rune { + return brailleOftMap[y%4][x%2] +} + +func (c Canvas) rawCh(x, y int) rune { + if ch, ok := c[[2]int{x, y}]; ok { + return ch + } + return '\u0000' //brailleOffset +} + +// return coordinate in terminal +func chPos(x, y int) (int, int) { + return y / 4, x / 2 +} + +// Set sets a point (x,y) in the virtual coordinate +func (c Canvas) Set(x, y int) { + i, j := chPos(x, y) + ch := c.rawCh(i, j) + ch |= chOft(x, y) + c[[2]int{i, j}] = ch +} + +// Unset removes point (x,y) +func (c Canvas) Unset(x, y int) { + i, j := chPos(x, y) + ch := c.rawCh(i, j) + ch &= ^chOft(x, y) + c[[2]int{i, j}] = ch +} + +// Buffer returns un-styled points +func (c Canvas) Buffer() Buffer { + buf := NewBuffer() + for k, v := range c { + buf.Set(k[0], k[1], Cell{Ch: v + brailleBase}) + } + return buf +} diff --git a/vendor/github.com/airking05/termui/config.py b/vendor/github.com/airking05/termui/config.py new file mode 100644 index 0000000..30fadcf --- /dev/null +++ b/vendor/github.com/airking05/termui/config.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import re +import os +import io + +copyright = """// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +""" + +exclude_dirs = [".git", "_docs"] +exclude_files = [] +include_dirs = [".", "debug", "extra", "test", "_example"] + + +def is_target(fpath): + if os.path.splitext(fpath)[-1] == ".go": + return True + return False + + +def update_copyright(fpath): + print("processing " + fpath) + f = io.open(fpath, 'r', encoding='utf-8') + fstr = f.read() + f.close() + + # remove old + m = re.search('^// Copyright .+?\r?\n\r?\n', fstr, re.MULTILINE|re.DOTALL) + if m: + fstr = fstr[m.end():] + + # add new + fstr = copyright + fstr + f = io.open(fpath, 'w',encoding='utf-8') + f.write(fstr) + f.close() + + +def main(): + for d in include_dirs: + files = [ + os.path.join(d, f) for f in os.listdir(d) + if os.path.isfile(os.path.join(d, f)) + ] + for f in files: + if is_target(f): + update_copyright(f) + + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/airking05/termui/doc.go b/vendor/github.com/airking05/termui/doc.go new file mode 100644 index 0000000..13924eb --- /dev/null +++ b/vendor/github.com/airking05/termui/doc.go @@ -0,0 +1,29 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +/* +Package termui is a library designed for creating command line UI. For more info, goto http://github.com/gizak/termui + +A simplest example: + package main + + import ui "github.com/gizak/termui" + + func main() { + if err:=ui.Init(); err != nil { + panic(err) + } + defer ui.Close() + + g := ui.NewGauge() + g.Percent = 50 + g.Width = 50 + g.BorderLabel = "Gauge" + + ui.Render(g) + + ui.Loop() + } +*/ +package termui diff --git a/vendor/github.com/airking05/termui/events.go b/vendor/github.com/airking05/termui/events.go new file mode 100644 index 0000000..eb7319b --- /dev/null +++ b/vendor/github.com/airking05/termui/events.go @@ -0,0 +1,323 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "path" + "strconv" + "sync" + "time" + + "github.com/nsf/termbox-go" +) + +type Event struct { + Type string + Path string + From string + To string + Data interface{} + Time int64 +} + +var sysEvtChs []chan Event + +type EvtKbd struct { + KeyStr string +} + +func evtKbd(e termbox.Event) EvtKbd { + ek := EvtKbd{} + + k := string(e.Ch) + pre := "" + mod := "" + + if e.Mod == termbox.ModAlt { + mod = "M-" + } + if e.Ch == 0 { + if e.Key > 0xFFFF-12 { + k = "" + } else if e.Key > 0xFFFF-25 { + ks := []string{"", "", "", "", "", "", "", "", "", ""} + k = ks[0xFFFF-int(e.Key)-12] + } + + if e.Key <= 0x7F { + pre = "C-" + k = string('a' - 1 + int(e.Key)) + kmap := map[termbox.Key][2]string{ + termbox.KeyCtrlSpace: {"C-", ""}, + termbox.KeyBackspace: {"", ""}, + termbox.KeyTab: {"", ""}, + termbox.KeyEnter: {"", ""}, + termbox.KeyEsc: {"", ""}, + termbox.KeyCtrlBackslash: {"C-", "\\"}, + termbox.KeyCtrlSlash: {"C-", "/"}, + termbox.KeySpace: {"", ""}, + termbox.KeyCtrl8: {"C-", "8"}, + } + if sk, ok := kmap[e.Key]; ok { + pre = sk[0] + k = sk[1] + } + } + } + + ek.KeyStr = pre + mod + k + return ek +} + +func crtTermboxEvt(e termbox.Event) Event { + systypemap := map[termbox.EventType]string{ + termbox.EventKey: "keyboard", + termbox.EventResize: "window", + termbox.EventMouse: "mouse", + termbox.EventError: "error", + termbox.EventInterrupt: "interrupt", + } + ne := Event{From: "/sys", Time: time.Now().Unix()} + typ := e.Type + ne.Type = systypemap[typ] + + switch typ { + case termbox.EventKey: + kbd := evtKbd(e) + ne.Path = "/sys/kbd/" + kbd.KeyStr + ne.Data = kbd + case termbox.EventResize: + wnd := EvtWnd{} + wnd.Width = e.Width + wnd.Height = e.Height + ne.Path = "/sys/wnd/resize" + ne.Data = wnd + case termbox.EventError: + err := EvtErr(e.Err) + ne.Path = "/sys/err" + ne.Data = err + case termbox.EventMouse: + m := EvtMouse{} + m.X = e.MouseX + m.Y = e.MouseY + ne.Path = "/sys/mouse" + ne.Data = m + } + return ne +} + +type EvtWnd struct { + Width int + Height int +} + +type EvtMouse struct { + X int + Y int + Press string +} + +type EvtErr error + +func hookTermboxEvt() { + for { + e := termbox.PollEvent() + + for _, c := range sysEvtChs { + go func(ch chan Event) { + ch <- crtTermboxEvt(e) + }(c) + } + } +} + +func NewSysEvtCh() chan Event { + ec := make(chan Event) + sysEvtChs = append(sysEvtChs, ec) + return ec +} + +var DefaultEvtStream = NewEvtStream() + +type EvtStream struct { + sync.RWMutex + srcMap map[string]chan Event + stream chan Event + wg sync.WaitGroup + sigStopLoop chan Event + Handlers map[string]func(Event) + hook func(Event) +} + +func NewEvtStream() *EvtStream { + return &EvtStream{ + srcMap: make(map[string]chan Event), + stream: make(chan Event), + Handlers: make(map[string]func(Event)), + sigStopLoop: make(chan Event), + } +} + +func (es *EvtStream) Init() { + es.Merge("internal", es.sigStopLoop) + go func() { + es.wg.Wait() + close(es.stream) + }() +} + +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + return path.Clean(p) +} + +func isPathMatch(pattern, path string) bool { + if len(pattern) == 0 { + return false + } + n := len(pattern) + return len(path) >= n && path[0:n] == pattern +} + +func (es *EvtStream) Merge(name string, ec chan Event) { + es.Lock() + defer es.Unlock() + + es.wg.Add(1) + es.srcMap[name] = ec + + go func(a chan Event) { + for n := range a { + n.From = name + es.stream <- n + } + es.wg.Done() + }(ec) +} + +func (es *EvtStream) Handle(path string, handler func(Event)) { + es.Handlers[cleanPath(path)] = handler +} + +func findMatch(mux map[string]func(Event), path string) string { + n := -1 + pattern := "" + for m := range mux { + if !isPathMatch(m, path) { + continue + } + if len(m) > n { + pattern = m + n = len(m) + } + } + return pattern + +} +// Remove all existing defined Handlers from the map +func (es *EvtStream) ResetHandlers() { + for Path, _ := range es.Handlers { + delete(es.Handlers, Path) + } + return +} + +func (es *EvtStream) match(path string) string { + return findMatch(es.Handlers, path) +} + +func (es *EvtStream) Hook(f func(Event)) { + es.hook = f +} + +func (es *EvtStream) Loop() { + for e := range es.stream { + switch e.Path { + case "/sig/stoploop": + return + } + go func(a Event) { + es.RLock() + defer es.RUnlock() + if pattern := es.match(a.Path); pattern != "" { + es.Handlers[pattern](a) + } + }(e) + if es.hook != nil { + es.hook(e) + } + } +} + +func (es *EvtStream) StopLoop() { + go func() { + e := Event{ + Path: "/sig/stoploop", + } + es.sigStopLoop <- e + }() +} + +func Merge(name string, ec chan Event) { + DefaultEvtStream.Merge(name, ec) +} + +func Handle(path string, handler func(Event)) { + DefaultEvtStream.Handle(path, handler) +} + +func Loop() { + DefaultEvtStream.Loop() +} + +func StopLoop() { + DefaultEvtStream.StopLoop() +} + +type EvtTimer struct { + Duration time.Duration + Count uint64 +} + +func NewTimerCh(du time.Duration) chan Event { + t := make(chan Event) + + go func(a chan Event) { + n := uint64(0) + for { + n++ + time.Sleep(du) + e := Event{} + e.Type = "timer" + e.Path = "/timer/" + du.String() + e.Time = time.Now().Unix() + e.Data = EvtTimer{ + Duration: du, + Count: n, + } + t <- e + + } + }(t) + return t +} + +var DefualtHandler = func(e Event) { +} + +var usrEvtCh = make(chan Event) + +func SendCustomEvt(path string, data interface{}) { + e := Event{} + e.Path = path + e.Data = data + e.Time = time.Now().Unix() + usrEvtCh <- e +} diff --git a/vendor/github.com/airking05/termui/gauge.go b/vendor/github.com/airking05/termui/gauge.go new file mode 100644 index 0000000..9f6ce3a --- /dev/null +++ b/vendor/github.com/airking05/termui/gauge.go @@ -0,0 +1,109 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "strconv" + "strings" +) + +// Gauge is a progress bar like widget. +// A simple example: +/* + g := termui.NewGauge() + g.Percent = 40 + g.Width = 50 + g.Height = 3 + g.BorderLabel = "Slim Gauge" + g.BarColor = termui.ColorRed + g.PercentColor = termui.ColorBlue +*/ + +const ColorUndef Attribute = Attribute(^uint16(0)) + +type Gauge struct { + Block + Percent int + BarColor Attribute + PercentColor Attribute + PercentColorHighlighted Attribute + Label string + LabelAlign Align +} + +// NewGauge return a new gauge with current theme. +func NewGauge() *Gauge { + g := &Gauge{ + Block: *NewBlock(), + PercentColor: ThemeAttr("gauge.percent.fg"), + BarColor: ThemeAttr("gauge.bar.bg"), + Label: "{{percent}}%", + LabelAlign: AlignCenter, + PercentColorHighlighted: ColorUndef, + } + + g.Width = 12 + g.Height = 5 + return g +} + +// Buffer implements Bufferer interface. +func (g *Gauge) Buffer() Buffer { + buf := g.Block.Buffer() + + // plot bar + w := g.Percent * g.innerArea.Dx() / 100 + for i := 0; i < g.innerArea.Dy(); i++ { + for j := 0; j < w; j++ { + c := Cell{} + c.Ch = ' ' + c.Bg = g.BarColor + if c.Bg == ColorDefault { + c.Bg |= AttrReverse + } + buf.Set(g.innerArea.Min.X+j, g.innerArea.Min.Y+i, c) + } + } + + // plot percentage + s := strings.Replace(g.Label, "{{percent}}", strconv.Itoa(g.Percent), -1) + pry := g.innerArea.Min.Y + g.innerArea.Dy()/2 + rs := str2runes(s) + var pos int + switch g.LabelAlign { + case AlignLeft: + pos = 0 + + case AlignCenter: + pos = (g.innerArea.Dx() - strWidth(s)) / 2 + + case AlignRight: + pos = g.innerArea.Dx() - strWidth(s) - 1 + } + pos += g.innerArea.Min.X + + for i, v := range rs { + c := Cell{ + Ch: v, + Fg: g.PercentColor, + } + + if w+g.innerArea.Min.X > pos+i { + c.Bg = g.BarColor + if c.Bg == ColorDefault { + c.Bg |= AttrReverse + } + + if g.PercentColorHighlighted != ColorUndef { + c.Fg = g.PercentColorHighlighted + } + } else { + c.Bg = g.Block.Bg + } + + buf.Set(1+pos+i, pry, c) + } + return buf +} diff --git a/vendor/github.com/airking05/termui/glide.lock b/vendor/github.com/airking05/termui/glide.lock new file mode 100644 index 0000000..be5952d --- /dev/null +++ b/vendor/github.com/airking05/termui/glide.lock @@ -0,0 +1,30 @@ +hash: 7a754ba100256404a978b2fc8738aee337beb822458e4b6060399fb89ebd215c +updated: 2016-11-03T17:39:24.323773674-04:00 +imports: +- name: github.com/maruel/panicparse + version: ad661195ed0e88491e0f14be6613304e3b1141d6 + subpackages: + - stack +- name: github.com/mattn/go-runewidth + version: 737072b4e32b7a5018b4a7125da8d12de90e8045 +- name: github.com/mitchellh/go-wordwrap + version: ad45545899c7b13c020ea92b2072220eefad42b8 +- name: github.com/nsf/termbox-go + version: b6acae516ace002cb8105a89024544a1480655a5 +- name: golang.org/x/net + version: 569280fa63be4e201b975e5411e30a92178f0118 + subpackages: + - websocket +testImports: +- name: github.com/davecgh/go-spew + version: 346938d642f2ec3594ed81d874461961cd0faa76 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506 + subpackages: + - assert diff --git a/vendor/github.com/airking05/termui/glide.yaml b/vendor/github.com/airking05/termui/glide.yaml new file mode 100644 index 0000000..a681231 --- /dev/null +++ b/vendor/github.com/airking05/termui/glide.yaml @@ -0,0 +1,9 @@ +package: github.com/gizak/termui +import: +- package: github.com/mattn/go-runewidth +- package: github.com/mitchellh/go-wordwrap +- package: github.com/nsf/termbox-go +- package: golang.org/x/net + subpackages: + - websocket +- package: github.com/maruel/panicparse diff --git a/vendor/github.com/airking05/termui/grid.go b/vendor/github.com/airking05/termui/grid.go new file mode 100644 index 0000000..a950232 --- /dev/null +++ b/vendor/github.com/airking05/termui/grid.go @@ -0,0 +1,279 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +// GridBufferer introduces a Bufferer that can be manipulated by Grid. +type GridBufferer interface { + Bufferer + GetHeight() int + SetWidth(int) + SetX(int) + SetY(int) +} + +// Row builds a layout tree +type Row struct { + Cols []*Row //children + Widget GridBufferer // root + X int + Y int + Width int + Height int + Span int + Offset int +} + +// calculate and set the underlying layout tree's x, y, height and width. +func (r *Row) calcLayout() { + r.assignWidth(r.Width) + r.Height = r.solveHeight() + r.assignX(r.X) + r.assignY(r.Y) +} + +// tell if the node is leaf in the tree. +func (r *Row) isLeaf() bool { + return r.Cols == nil || len(r.Cols) == 0 +} + +func (r *Row) isRenderableLeaf() bool { + return r.isLeaf() && r.Widget != nil +} + +// assign widgets' (and their parent rows') width recursively. +func (r *Row) assignWidth(w int) { + r.SetWidth(w) + + accW := 0 // acc span and offset + calcW := make([]int, len(r.Cols)) // calculated width + calcOftX := make([]int, len(r.Cols)) // computated start position of x + + for i, c := range r.Cols { + accW += c.Span + c.Offset + cw := int(float64(c.Span*r.Width) / 12.0) + + if i >= 1 { + calcOftX[i] = calcOftX[i-1] + + calcW[i-1] + + int(float64(r.Cols[i-1].Offset*r.Width)/12.0) + } + + // use up the space if it is the last col + if i == len(r.Cols)-1 && accW == 12 { + cw = r.Width - calcOftX[i] + } + calcW[i] = cw + r.Cols[i].assignWidth(cw) + } +} + +// bottom up calc and set rows' (and their widgets') height, +// return r's total height. +func (r *Row) solveHeight() int { + if r.isRenderableLeaf() { + r.Height = r.Widget.GetHeight() + return r.Widget.GetHeight() + } + + maxh := 0 + if !r.isLeaf() { + for _, c := range r.Cols { + nh := c.solveHeight() + // when embed rows in Cols, row widgets stack up + if r.Widget != nil { + nh += r.Widget.GetHeight() + } + if nh > maxh { + maxh = nh + } + } + } + + r.Height = maxh + return maxh +} + +// recursively assign x position for r tree. +func (r *Row) assignX(x int) { + r.SetX(x) + + if !r.isLeaf() { + acc := 0 + for i, c := range r.Cols { + if c.Offset != 0 { + acc += int(float64(c.Offset*r.Width) / 12.0) + } + r.Cols[i].assignX(x + acc) + acc += c.Width + } + } +} + +// recursively assign y position to r. +func (r *Row) assignY(y int) { + r.SetY(y) + + if r.isLeaf() { + return + } + + for i := range r.Cols { + acc := 0 + if r.Widget != nil { + acc = r.Widget.GetHeight() + } + r.Cols[i].assignY(y + acc) + } + +} + +// GetHeight implements GridBufferer interface. +func (r Row) GetHeight() int { + return r.Height +} + +// SetX implements GridBufferer interface. +func (r *Row) SetX(x int) { + r.X = x + if r.Widget != nil { + r.Widget.SetX(x) + } +} + +// SetY implements GridBufferer interface. +func (r *Row) SetY(y int) { + r.Y = y + if r.Widget != nil { + r.Widget.SetY(y) + } +} + +// SetWidth implements GridBufferer interface. +func (r *Row) SetWidth(w int) { + r.Width = w + if r.Widget != nil { + r.Widget.SetWidth(w) + } +} + +// Buffer implements Bufferer interface, +// recursively merge all widgets buffer +func (r *Row) Buffer() Buffer { + merged := NewBuffer() + + if r.isRenderableLeaf() { + return r.Widget.Buffer() + } + + // for those are not leaves but have a renderable widget + if r.Widget != nil { + merged.Merge(r.Widget.Buffer()) + } + + // collect buffer from children + if !r.isLeaf() { + for _, c := range r.Cols { + merged.Merge(c.Buffer()) + } + } + + return merged +} + +// Grid implements 12 columns system. +// A simple example: +/* + import ui "github.com/gizak/termui" + // init and create widgets... + + // build + ui.Body.AddRows( + ui.NewRow( + ui.NewCol(6, 0, widget0), + ui.NewCol(6, 0, widget1)), + ui.NewRow( + ui.NewCol(3, 0, widget2), + ui.NewCol(3, 0, widget30, widget31, widget32), + ui.NewCol(6, 0, widget4))) + + // calculate layout + ui.Body.Align() + + ui.Render(ui.Body) +*/ +type Grid struct { + Rows []*Row + Width int + X int + Y int + BgColor Attribute +} + +// NewGrid returns *Grid with given rows. +func NewGrid(rows ...*Row) *Grid { + return &Grid{Rows: rows} +} + +// AddRows appends given rows to Grid. +func (g *Grid) AddRows(rs ...*Row) { + g.Rows = append(g.Rows, rs...) +} + +// NewRow creates a new row out of given columns. +func NewRow(cols ...*Row) *Row { + rs := &Row{Span: 12, Cols: cols} + return rs +} + +// NewCol accepts: widgets are LayoutBufferer or widgets is A NewRow. +// Note that if multiple widgets are provided, they will stack up in the col. +func NewCol(span, offset int, widgets ...GridBufferer) *Row { + r := &Row{Span: span, Offset: offset} + + if widgets != nil && len(widgets) == 1 { + wgt := widgets[0] + nw, isRow := wgt.(*Row) + if isRow { + r.Cols = nw.Cols + } else { + r.Widget = wgt + } + return r + } + + r.Cols = []*Row{} + ir := r + for _, w := range widgets { + nr := &Row{Span: 12, Widget: w} + ir.Cols = []*Row{nr} + ir = nr + } + + return r +} + +// Align calculate each rows' layout. +func (g *Grid) Align() { + h := 0 + for _, r := range g.Rows { + r.SetWidth(g.Width) + r.SetX(g.X) + r.SetY(g.Y + h) + r.calcLayout() + h += r.GetHeight() + } +} + +// Buffer implments Bufferer interface. +func (g Grid) Buffer() Buffer { + buf := NewBuffer() + + for _, r := range g.Rows { + buf.Merge(r.Buffer()) + } + return buf +} + +var Body *Grid diff --git a/vendor/github.com/airking05/termui/helper.go b/vendor/github.com/airking05/termui/helper.go new file mode 100644 index 0000000..18a6770 --- /dev/null +++ b/vendor/github.com/airking05/termui/helper.go @@ -0,0 +1,222 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "regexp" + "strings" + + tm "github.com/nsf/termbox-go" +) +import rw "github.com/mattn/go-runewidth" + +/* ---------------Port from termbox-go --------------------- */ + +// Attribute is printable cell's color and style. +type Attribute uint16 + +// 8 basic clolrs +const ( + ColorDefault Attribute = iota + ColorBlack + ColorRed + ColorGreen + ColorYellow + ColorBlue + ColorMagenta + ColorCyan + ColorWhite +) + +//Have a constant that defines number of colors +const NumberofColors = 8 + +// Text style +const ( + AttrBold Attribute = 1 << (iota + 9) + AttrUnderline + AttrReverse +) + +var ( + dot = "…" + dotw = rw.StringWidth(dot) +) + +/* ----------------------- End ----------------------------- */ + +func toTmAttr(x Attribute) tm.Attribute { + return tm.Attribute(x) +} + +func str2runes(s string) []rune { + return []rune(s) +} + +// Here for backwards-compatibility. +func trimStr2Runes(s string, w int) []rune { + return TrimStr2Runes(s, w) +} + +// TrimStr2Runes trims string to w[-1 rune], appends …, and returns the runes +// of that string if string is grather then n. If string is small then w, +// return the runes. +func TrimStr2Runes(s string, w int) []rune { + if w <= 0 { + return []rune{} + } + + sw := rw.StringWidth(s) + if sw > w { + return []rune(rw.Truncate(s, w, dot)) + } + return str2runes(s) +} + +// TrimStrIfAppropriate trim string to "s[:-1] + …" +// if string > width otherwise return string +func TrimStrIfAppropriate(s string, w int) string { + if w <= 0 { + return "" + } + + sw := rw.StringWidth(s) + if sw > w { + return rw.Truncate(s, w, dot) + } + + return s +} + +func strWidth(s string) int { + return rw.StringWidth(s) +} + +func charWidth(ch rune) int { + return rw.RuneWidth(ch) +} + +var whiteSpaceRegex = regexp.MustCompile(`\s`) + +// StringToAttribute converts text to a termui attribute. You may specifiy more +// then one attribute like that: "BLACK, BOLD, ...". All whitespaces +// are ignored. +func StringToAttribute(text string) Attribute { + text = whiteSpaceRegex.ReplaceAllString(strings.ToLower(text), "") + attributes := strings.Split(text, ",") + result := Attribute(0) + + for _, theAttribute := range attributes { + var match Attribute + switch theAttribute { + case "reset", "default": + match = ColorDefault + + case "black": + match = ColorBlack + + case "red": + match = ColorRed + + case "green": + match = ColorGreen + + case "yellow": + match = ColorYellow + + case "blue": + match = ColorBlue + + case "magenta": + match = ColorMagenta + + case "cyan": + match = ColorCyan + + case "white": + match = ColorWhite + + case "bold": + match = AttrBold + + case "underline": + match = AttrUnderline + + case "reverse": + match = AttrReverse + } + + result |= match + } + + return result +} + +// TextCells returns a coloured text cells []Cell +func TextCells(s string, fg, bg Attribute) []Cell { + cs := make([]Cell, 0, len(s)) + + // sequence := MarkdownTextRendererFactory{}.TextRenderer(s).Render(fg, bg) + // runes := []rune(sequence.NormalizedText) + runes := str2runes(s) + + for n := range runes { + // point, _ := sequence.PointAt(n, 0, 0) + // cs = append(cs, Cell{point.Ch, point.Fg, point.Bg}) + cs = append(cs, Cell{runes[n], fg, bg}) + } + return cs +} + +// Width returns the actual screen space the cell takes (usually 1 or 2). +func (c Cell) Width() int { + return charWidth(c.Ch) +} + +// Copy return a copy of c +func (c Cell) Copy() Cell { + return c +} + +// TrimTxCells trims the overflowed text cells sequence. +func TrimTxCells(cs []Cell, w int) []Cell { + if len(cs) <= w { + return cs + } + return cs[:w] +} + +// DTrimTxCls trims the overflowed text cells sequence and append dots at the end. +func DTrimTxCls(cs []Cell, w int) []Cell { + l := len(cs) + if l <= 0 { + return []Cell{} + } + + rt := make([]Cell, 0, w) + csw := 0 + for i := 0; i < l && csw <= w; i++ { + c := cs[i] + cw := c.Width() + + if cw+csw < w { + rt = append(rt, c) + csw += cw + } else { + rt = append(rt, Cell{'…', c.Fg, c.Bg}) + break + } + } + + return rt +} + +func CellsToStr(cs []Cell) string { + str := "" + for _, c := range cs { + str += string(c.Ch) + } + return str +} diff --git a/vendor/github.com/airking05/termui/linechart.go b/vendor/github.com/airking05/termui/linechart.go new file mode 100644 index 0000000..f7eea28 --- /dev/null +++ b/vendor/github.com/airking05/termui/linechart.go @@ -0,0 +1,331 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "fmt" + "math" +) + +// only 16 possible combinations, why bother +var braillePatterns = map[[2]int]rune{ + [2]int{0, 0}: '⣀', + [2]int{0, 1}: '⡠', + [2]int{0, 2}: '⡐', + [2]int{0, 3}: '⡈', + + [2]int{1, 0}: '⢄', + [2]int{1, 1}: '⠤', + [2]int{1, 2}: '⠔', + [2]int{1, 3}: '⠌', + + [2]int{2, 0}: '⢂', + [2]int{2, 1}: '⠢', + [2]int{2, 2}: '⠒', + [2]int{2, 3}: '⠊', + + [2]int{3, 0}: '⢁', + [2]int{3, 1}: '⠡', + [2]int{3, 2}: '⠑', + [2]int{3, 3}: '⠉', +} + +var lSingleBraille = [4]rune{'\u2840', '⠄', '⠂', '⠁'} +var rSingleBraille = [4]rune{'\u2880', '⠠', '⠐', '⠈'} + +// LineChart has two modes: braille(default) and dot. Using braille gives 2x capicity as dot mode, +// because one braille char can represent two data points. +/* + lc := termui.NewLineChart() + lc.BorderLabel = "braille-mode Line Chart" + lc.Data = [1.2, 1.3, 1.5, 1.7, 1.5, 1.6, 1.8, 2.0] + lc.Width = 50 + lc.Height = 12 + lc.AxesColor = termui.ColorWhite + lc.LineColor = termui.ColorGreen | termui.AttrBold + // termui.Render(lc)... +*/ +type LineChart struct { + Block + Data []float64 + DataLabels []string // if unset, the data indices will be used + Mode string // braille | dot + DotStyle rune + LineColor Attribute + scale float64 // data span per cell on y-axis + AxesColor Attribute + drawingX int + drawingY int + axisYHeight int + axisXWidth int + axisYLabelGap int + axisXLabelGap int + topValue float64 + bottomValue float64 + labelX [][]rune + labelY [][]rune + labelYSpace int + maxY float64 + minY float64 + autoLabels bool +} + +// NewLineChart returns a new LineChart with current theme. +func NewLineChart() *LineChart { + lc := &LineChart{Block: *NewBlock()} + lc.AxesColor = ThemeAttr("linechart.axes.fg") + lc.LineColor = ThemeAttr("linechart.line.fg") + lc.Mode = "braille" + lc.DotStyle = '•' + lc.axisXLabelGap = 2 + lc.axisYLabelGap = 1 + lc.bottomValue = math.Inf(1) + lc.topValue = math.Inf(-1) + return lc +} + +// one cell contains two data points +// so the capicity is 2x as dot-mode +func (lc *LineChart) renderBraille() Buffer { + buf := NewBuffer() + + // return: b -> which cell should the point be in + // m -> in the cell, divided into 4 equal height levels, which subcell? + getPos := func(d float64) (b, m int) { + cnt4 := int((d-lc.bottomValue)/(lc.scale/4) + 0.5) + b = cnt4 / 4 + m = cnt4 % 4 + return + } + // plot points + for i := 0; 2*i+1 < len(lc.Data) && i < lc.axisXWidth; i++ { + b0, m0 := getPos(lc.Data[2*i]) + b1, m1 := getPos(lc.Data[2*i+1]) + + if b0 == b1 { + c := Cell{ + Ch: braillePatterns[[2]int{m0, m1}], + Bg: lc.Bg, + Fg: lc.LineColor, + } + y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0 + x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i + buf.Set(x, y, c) + } else { + c0 := Cell{Ch: lSingleBraille[m0], + Fg: lc.LineColor, + Bg: lc.Bg} + x0 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i + y0 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0 + buf.Set(x0, y0, c0) + + c1 := Cell{Ch: rSingleBraille[m1], + Fg: lc.LineColor, + Bg: lc.Bg} + x1 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i + y1 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b1 + buf.Set(x1, y1, c1) + } + + } + return buf +} + +func (lc *LineChart) renderDot() Buffer { + buf := NewBuffer() + for i := 0; i < len(lc.Data) && i < lc.axisXWidth; i++ { + c := Cell{ + Ch: lc.DotStyle, + Fg: lc.LineColor, + Bg: lc.Bg, + } + x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i + y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - int((lc.Data[i]-lc.bottomValue)/lc.scale+0.5) + buf.Set(x, y, c) + } + + return buf +} + +func (lc *LineChart) calcLabelX() { + lc.labelX = [][]rune{} + + for i, l := 0, 0; i < len(lc.DataLabels) && l < lc.axisXWidth; i++ { + if lc.Mode == "dot" { + if l >= len(lc.DataLabels) { + break + } + + s := str2runes(lc.DataLabels[l]) + w := strWidth(lc.DataLabels[l]) + if l+w <= lc.axisXWidth { + lc.labelX = append(lc.labelX, s) + } + l += w + lc.axisXLabelGap + } else { // braille + if 2*l >= len(lc.DataLabels) { + break + } + + s := str2runes(lc.DataLabels[2*l]) + w := strWidth(lc.DataLabels[2*l]) + if l+w <= lc.axisXWidth { + lc.labelX = append(lc.labelX, s) + } + l += w + lc.axisXLabelGap + + } + } +} + +func shortenFloatVal(x float64) string { + s := fmt.Sprintf("%.2f", x) + if len(s)-3 > 3 { + s = fmt.Sprintf("%.2e", x) + } + + if x < 0 { + s = fmt.Sprintf("%.2f", x) + } + return s +} + +func (lc *LineChart) calcLabelY() { + span := lc.topValue - lc.bottomValue + lc.scale = span / float64(lc.axisYHeight) + + n := (1 + lc.axisYHeight) / (lc.axisYLabelGap + 1) + lc.labelY = make([][]rune, n) + maxLen := 0 + for i := 0; i < n; i++ { + s := str2runes(shortenFloatVal(lc.bottomValue + float64(i)*span/float64(n))) + if len(s) > maxLen { + maxLen = len(s) + } + lc.labelY[i] = s + } + + lc.labelYSpace = maxLen +} + +func (lc *LineChart) calcLayout() { + // set datalabels if it is not provided + if (lc.DataLabels == nil || len(lc.DataLabels) == 0) || lc.autoLabels { + lc.autoLabels = true + lc.DataLabels = make([]string, len(lc.Data)) + for i := range lc.Data { + lc.DataLabels[i] = fmt.Sprint(i) + } + } + + // lazy increase, to avoid y shaking frequently + // update bound Y when drawing is gonna overflow + lc.minY = lc.Data[0] + lc.maxY = lc.Data[0] + + // valid visible range + vrange := lc.innerArea.Dx() + if lc.Mode == "braille" { + vrange = 2 * lc.innerArea.Dx() + } + if vrange > len(lc.Data) { + vrange = len(lc.Data) + } + + for _, v := range lc.Data[:vrange] { + if v > lc.maxY { + lc.maxY = v + } + if v < lc.minY { + lc.minY = v + } + } + + span := lc.maxY - lc.minY + + if lc.minY < lc.bottomValue { + lc.bottomValue = lc.minY - 0.2*span + } + + if lc.maxY > lc.topValue { + lc.topValue = lc.maxY + 0.2*span + } + + lc.axisYHeight = lc.innerArea.Dy() - 2 + lc.calcLabelY() + + lc.axisXWidth = lc.innerArea.Dx() - 1 - lc.labelYSpace + lc.calcLabelX() + + lc.drawingX = lc.innerArea.Min.X + 1 + lc.labelYSpace + lc.drawingY = lc.innerArea.Min.Y +} + +func (lc *LineChart) plotAxes() Buffer { + buf := NewBuffer() + + origY := lc.innerArea.Min.Y + lc.innerArea.Dy() - 2 + origX := lc.innerArea.Min.X + lc.labelYSpace + + buf.Set(origX, origY, Cell{Ch: ORIGIN, Fg: lc.AxesColor, Bg: lc.Bg}) + + for x := origX + 1; x < origX+lc.axisXWidth; x++ { + buf.Set(x, origY, Cell{Ch: HDASH, Fg: lc.AxesColor, Bg: lc.Bg}) + } + + for dy := 1; dy <= lc.axisYHeight; dy++ { + buf.Set(origX, origY-dy, Cell{Ch: VDASH, Fg: lc.AxesColor, Bg: lc.Bg}) + } + + // x label + oft := 0 + for _, rs := range lc.labelX { + if oft+len(rs) > lc.axisXWidth { + break + } + for j, r := range rs { + c := Cell{ + Ch: r, + Fg: lc.AxesColor, + Bg: lc.Bg, + } + x := origX + oft + j + y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 1 + buf.Set(x, y, c) + } + oft += len(rs) + lc.axisXLabelGap + } + + // y labels + for i, rs := range lc.labelY { + for j, r := range rs { + buf.Set( + lc.innerArea.Min.X+j, + origY-i*(lc.axisYLabelGap+1), + Cell{Ch: r, Fg: lc.AxesColor, Bg: lc.Bg}) + } + } + + return buf +} + +// Buffer implements Bufferer interface. +func (lc *LineChart) Buffer() Buffer { + buf := lc.Block.Buffer() + + if lc.Data == nil || len(lc.Data) == 0 { + return buf + } + lc.calcLayout() + buf.Merge(lc.plotAxes()) + + if lc.Mode == "dot" { + buf.Merge(lc.renderDot()) + } else { + buf.Merge(lc.renderBraille()) + } + + return buf +} diff --git a/vendor/github.com/airking05/termui/linechart_others.go b/vendor/github.com/airking05/termui/linechart_others.go new file mode 100644 index 0000000..14897ea --- /dev/null +++ b/vendor/github.com/airking05/termui/linechart_others.go @@ -0,0 +1,11 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +// +build !windows + +package termui + +const VDASH = '┊' +const HDASH = '┈' +const ORIGIN = '└' diff --git a/vendor/github.com/airking05/termui/linechart_windows.go b/vendor/github.com/airking05/termui/linechart_windows.go new file mode 100644 index 0000000..994d3e5 --- /dev/null +++ b/vendor/github.com/airking05/termui/linechart_windows.go @@ -0,0 +1,11 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +// +build windows + +package termui + +const VDASH = '|' +const HDASH = '-' +const ORIGIN = '+' diff --git a/vendor/github.com/airking05/termui/list.go b/vendor/github.com/airking05/termui/list.go new file mode 100644 index 0000000..ea6635e --- /dev/null +++ b/vendor/github.com/airking05/termui/list.go @@ -0,0 +1,89 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "strings" + +// List displays []string as its items, +// it has a Overflow option (default is "hidden"), when set to "hidden", +// the item exceeding List's width is truncated, but when set to "wrap", +// the overflowed text breaks into next line. +/* + strs := []string{ + "[0] github.com/gizak/termui", + "[1] editbox.go", + "[2] iterrupt.go", + "[3] keyboard.go", + "[4] output.go", + "[5] random_out.go", + "[6] dashboard.go", + "[7] nsf/termbox-go"} + + ls := termui.NewList() + ls.Items = strs + ls.ItemFgColor = termui.ColorYellow + ls.BorderLabel = "List" + ls.Height = 7 + ls.Width = 25 + ls.Y = 0 +*/ +type List struct { + Block + Items []string + Overflow string + ItemFgColor Attribute + ItemBgColor Attribute +} + +// NewList returns a new *List with current theme. +func NewList() *List { + l := &List{Block: *NewBlock()} + l.Overflow = "hidden" + l.ItemFgColor = ThemeAttr("list.item.fg") + l.ItemBgColor = ThemeAttr("list.item.bg") + return l +} + +// Buffer implements Bufferer interface. +func (l *List) Buffer() Buffer { + buf := l.Block.Buffer() + + switch l.Overflow { + case "wrap": + cs := DefaultTxBuilder.Build(strings.Join(l.Items, "\n"), l.ItemFgColor, l.ItemBgColor) + i, j, k := 0, 0, 0 + for i < l.innerArea.Dy() && k < len(cs) { + w := cs[k].Width() + if cs[k].Ch == '\n' || j+w > l.innerArea.Dx() { + i++ + j = 0 + if cs[k].Ch == '\n' { + k++ + } + continue + } + buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, cs[k]) + + k++ + j++ + } + + case "hidden": + trimItems := l.Items + if len(trimItems) > l.innerArea.Dy() { + trimItems = trimItems[:l.innerArea.Dy()] + } + for i, v := range trimItems { + cs := DTrimTxCls(DefaultTxBuilder.Build(v, l.ItemFgColor, l.ItemBgColor), l.innerArea.Dx()) + j := 0 + for _, vv := range cs { + w := vv.Width() + buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, vv) + j += w + } + } + } + return buf +} diff --git a/vendor/github.com/airking05/termui/mbarchart.go b/vendor/github.com/airking05/termui/mbarchart.go new file mode 100644 index 0000000..0f91e97 --- /dev/null +++ b/vendor/github.com/airking05/termui/mbarchart.go @@ -0,0 +1,242 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "fmt" +) + +// This is the implemetation of multi-colored or stacked bar graph. This is different from default barGraph which is implemented in bar.go +// Multi-Colored-BarChart creates multiple bars in a widget: +/* + bc := termui.NewMBarChart() + data := make([][]int, 2) + data[0] := []int{3, 2, 5, 7, 9, 4} + data[1] := []int{7, 8, 5, 3, 1, 6} + bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"} + bc.BorderLabel = "Bar Chart" + bc.Data = data + bc.Width = 26 + bc.Height = 10 + bc.DataLabels = bclabels + bc.TextColor = termui.ColorGreen + bc.BarColor = termui.ColorRed + bc.NumColor = termui.ColorYellow +*/ +type MBarChart struct { + Block + BarColor [NumberofColors]Attribute + TextColor Attribute + NumColor [NumberofColors]Attribute + Data [NumberofColors][]int + DataLabels []string + BarWidth int + BarGap int + labels [][]rune + dataNum [NumberofColors][][]rune + numBar int + scale float64 + max int + minDataLen int + numStack int + ShowScale bool + maxScale []rune +} + +// NewBarChart returns a new *BarChart with current theme. +func NewMBarChart() *MBarChart { + bc := &MBarChart{Block: *NewBlock()} + bc.BarColor[0] = ThemeAttr("mbarchart.bar.bg") + bc.NumColor[0] = ThemeAttr("mbarchart.num.fg") + bc.TextColor = ThemeAttr("mbarchart.text.fg") + bc.BarGap = 1 + bc.BarWidth = 3 + return bc +} + +func (bc *MBarChart) layout() { + bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth) + bc.labels = make([][]rune, bc.numBar) + DataLen := 0 + LabelLen := len(bc.DataLabels) + bc.minDataLen = 9999 //Set this to some very hight value so that we find the minimum one We want to know which array among data[][] has got the least length + + // We need to know how many stack/data array data[0] , data[1] are there + for i := 0; i < len(bc.Data); i++ { + if bc.Data[i] == nil { + break + } + DataLen++ + } + bc.numStack = DataLen + + //We need to know what is the mimimum size of data array data[0] could have 10 elements data[1] could have only 5, so we plot only 5 bar graphs + + for i := 0; i < DataLen; i++ { + if bc.minDataLen > len(bc.Data[i]) { + bc.minDataLen = len(bc.Data[i]) + } + } + + if LabelLen > bc.minDataLen { + LabelLen = bc.minDataLen + } + + for i := 0; i < LabelLen && i < bc.numBar; i++ { + bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth) + } + + for i := 0; i < bc.numStack; i++ { + bc.dataNum[i] = make([][]rune, len(bc.Data[i])) + //For each stack of bar calcualte the rune + for j := 0; j < LabelLen && i < bc.numBar; j++ { + n := bc.Data[i][j] + s := fmt.Sprint(n) + bc.dataNum[i][j] = trimStr2Runes(s, bc.BarWidth) + } + //If color is not defined by default then populate a color that is different from the prevous bar + if bc.BarColor[i] == ColorDefault && bc.NumColor[i] == ColorDefault { + if i == 0 { + bc.BarColor[i] = ColorBlack + } else { + bc.BarColor[i] = bc.BarColor[i-1] + 1 + if bc.BarColor[i] > NumberofColors { + bc.BarColor[i] = ColorBlack + } + } + bc.NumColor[i] = (NumberofColors + 1) - bc.BarColor[i] //Make NumColor opposite of barColor for visibility + } + } + + //If Max value is not set then we have to populate, this time the max value will be max(sum(d1[0],d2[0],d3[0]) .... sum(d1[n], d2[n], d3[n])) + + if bc.max == 0 { + bc.max = -1 + } + for i := 0; i < bc.minDataLen && i < LabelLen; i++ { + var dsum int + for j := 0; j < bc.numStack; j++ { + dsum += bc.Data[j][i] + } + if dsum > bc.max { + bc.max = dsum + } + } + + //Finally Calculate max sale + if bc.ShowScale { + s := fmt.Sprintf("%d", bc.max) + bc.maxScale = trimStr2Runes(s, len(s)) + bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-2) + } else { + bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1) + } + +} + +func (bc *MBarChart) SetMax(max int) { + + if max > 0 { + bc.max = max + } +} + +// Buffer implements Bufferer interface. +func (bc *MBarChart) Buffer() Buffer { + buf := bc.Block.Buffer() + bc.layout() + var oftX int + + for i := 0; i < bc.numBar && i < bc.minDataLen && i < len(bc.DataLabels); i++ { + ph := 0 //Previous Height to stack up + oftX = i * (bc.BarWidth + bc.BarGap) + for i1 := 0; i1 < bc.numStack; i1++ { + h := int(float64(bc.Data[i1][i]) / bc.scale) + // plot bars + for j := 0; j < bc.BarWidth; j++ { + for k := 0; k < h; k++ { + c := Cell{ + Ch: ' ', + Bg: bc.BarColor[i1], + } + if bc.BarColor[i1] == ColorDefault { // when color is default, space char treated as transparent! + c.Bg |= AttrReverse + } + x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k - ph + buf.Set(x, y, c) + + } + } + ph += h + } + // plot text + for j, k := 0, 0; j < len(bc.labels[i]); j++ { + w := charWidth(bc.labels[i][j]) + c := Cell{ + Ch: bc.labels[i][j], + Bg: bc.Bg, + Fg: bc.TextColor, + } + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1 + x := bc.innerArea.Max.X + oftX + ((bc.BarWidth - len(bc.labels[i])) / 2) + k + buf.Set(x, y, c) + k += w + } + // plot num + ph = 0 //re-initialize previous height + for i1 := 0; i1 < bc.numStack; i1++ { + h := int(float64(bc.Data[i1][i]) / bc.scale) + for j := 0; j < len(bc.dataNum[i1][i]) && h > 0; j++ { + c := Cell{ + Ch: bc.dataNum[i1][i][j], + Fg: bc.NumColor[i1], + Bg: bc.BarColor[i1], + } + if bc.BarColor[i1] == ColorDefault { // the same as above + c.Bg |= AttrReverse + } + if h == 0 { + c.Bg = bc.Bg + } + x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i1][i]))/2 + j + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - ph + buf.Set(x, y, c) + } + ph += h + } + } + + if bc.ShowScale { + //Currently bar graph only supprts data range from 0 to MAX + //Plot 0 + c := Cell{ + Ch: '0', + Bg: bc.Bg, + Fg: bc.TextColor, + } + + y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 + x := bc.X + buf.Set(x, y, c) + + //Plot the maximum sacle value + for i := 0; i < len(bc.maxScale); i++ { + c := Cell{ + Ch: bc.maxScale[i], + Bg: bc.Bg, + Fg: bc.TextColor, + } + + y := bc.innerArea.Min.Y + x := bc.X + i + + buf.Set(x, y, c) + } + + } + + return buf +} diff --git a/vendor/github.com/airking05/termui/mkdocs.yml b/vendor/github.com/airking05/termui/mkdocs.yml new file mode 100644 index 0000000..2ab45f0 --- /dev/null +++ b/vendor/github.com/airking05/termui/mkdocs.yml @@ -0,0 +1,28 @@ +pages: +- Home: 'index.md' +- Quickstart: 'quickstart.md' +- Recipes: 'recipes.md' +- References: + - Layouts: 'layouts.md' + - Components: 'components.md' + - Events: 'events.md' + - Themes: 'themes.md' +- Versions: 'versions.md' +- About: 'about.md' + +site_name: termui +repo_url: https://github.com/gizak/termui/ +site_description: 'termui user guide' +site_author: gizak + +docs_dir: '_docs' + +theme: readthedocs + +markdown_extensions: + - smarty + - admonition + - toc + +extra: + version: 1.0 diff --git a/vendor/github.com/airking05/termui/par.go b/vendor/github.com/airking05/termui/par.go new file mode 100644 index 0000000..29b6d46 --- /dev/null +++ b/vendor/github.com/airking05/termui/par.go @@ -0,0 +1,73 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +// Par displays a paragraph. +/* + par := termui.NewPar("Simple Text") + par.Height = 3 + par.Width = 17 + par.BorderLabel = "Label" +*/ +type Par struct { + Block + Text string + TextFgColor Attribute + TextBgColor Attribute + WrapLength int // words wrap limit. Note it may not work properly with multi-width char +} + +// NewPar returns a new *Par with given text as its content. +func NewPar(s string) *Par { + return &Par{ + Block: *NewBlock(), + Text: s, + TextFgColor: ThemeAttr("par.text.fg"), + TextBgColor: ThemeAttr("par.text.bg"), + WrapLength: 0, + } +} + +// Buffer implements Bufferer interface. +func (p *Par) Buffer() Buffer { + buf := p.Block.Buffer() + + fg, bg := p.TextFgColor, p.TextBgColor + cs := DefaultTxBuilder.Build(p.Text, fg, bg) + + // wrap if WrapLength set + if p.WrapLength < 0 { + cs = wrapTx(cs, p.Width-2) + } else if p.WrapLength > 0 { + cs = wrapTx(cs, p.WrapLength) + } + + y, x, n := 0, 0, 0 + for y < p.innerArea.Dy() && n < len(cs) { + w := cs[n].Width() + if cs[n].Ch == '\n' || x+w > p.innerArea.Dx() { + y++ + x = 0 // set x = 0 + if cs[n].Ch == '\n' { + n++ + } + + if y >= p.innerArea.Dy() { + buf.Set(p.innerArea.Min.X+p.innerArea.Dx()-1, + p.innerArea.Min.Y+p.innerArea.Dy()-1, + Cell{Ch: '…', Fg: p.TextFgColor, Bg: p.TextBgColor}) + break + } + continue + } + + buf.Set(p.innerArea.Min.X+x, p.innerArea.Min.Y+y, cs[n]) + + n++ + x += w + } + + return buf +} diff --git a/vendor/github.com/airking05/termui/pos.go b/vendor/github.com/airking05/termui/pos.go new file mode 100644 index 0000000..c7d647f --- /dev/null +++ b/vendor/github.com/airking05/termui/pos.go @@ -0,0 +1,78 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "image" + +// Align is the position of the gauge's label. +type Align uint + +// All supported positions. +const ( + AlignNone Align = 0 + AlignLeft Align = 1 << iota + AlignRight + AlignBottom + AlignTop + AlignCenterVertical + AlignCenterHorizontal + AlignCenter = AlignCenterVertical | AlignCenterHorizontal +) + +func AlignArea(parent, child image.Rectangle, a Align) image.Rectangle { + w, h := child.Dx(), child.Dy() + + // parent center + pcx, pcy := parent.Min.X+parent.Dx()/2, parent.Min.Y+parent.Dy()/2 + // child center + ccx, ccy := child.Min.X+child.Dx()/2, child.Min.Y+child.Dy()/2 + + if a&AlignLeft == AlignLeft { + child.Min.X = parent.Min.X + child.Max.X = child.Min.X + w + } + + if a&AlignRight == AlignRight { + child.Max.X = parent.Max.X + child.Min.X = child.Max.X - w + } + + if a&AlignBottom == AlignBottom { + child.Max.Y = parent.Max.Y + child.Min.Y = child.Max.Y - h + } + + if a&AlignTop == AlignRight { + child.Min.Y = parent.Min.Y + child.Max.Y = child.Min.Y + h + } + + if a&AlignCenterHorizontal == AlignCenterHorizontal { + child.Min.X += pcx - ccx + child.Max.X = child.Min.X + w + } + + if a&AlignCenterVertical == AlignCenterVertical { + child.Min.Y += pcy - ccy + child.Max.Y = child.Min.Y + h + } + + return child +} + +func MoveArea(a image.Rectangle, dx, dy int) image.Rectangle { + a.Min.X += dx + a.Max.X += dx + a.Min.Y += dy + a.Max.Y += dy + return a +} + +var termWidth int +var termHeight int + +func TermRect() image.Rectangle { + return image.Rect(0, 0, termWidth, termHeight) +} diff --git a/vendor/github.com/airking05/termui/render.go b/vendor/github.com/airking05/termui/render.go new file mode 100644 index 0000000..5b58409 --- /dev/null +++ b/vendor/github.com/airking05/termui/render.go @@ -0,0 +1,164 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "image" + "io" + "sync" + "time" + + "fmt" + + "os" + + "runtime/debug" + + "bytes" + + "github.com/maruel/panicparse/stack" + tm "github.com/nsf/termbox-go" +) + +// Bufferer should be implemented by all renderable components. +type Bufferer interface { + Buffer() Buffer +} + +// Init initializes termui library. This function should be called before any others. +// After initialization, the library must be finalized by 'Close' function. +func Init() error { + if err := tm.Init(); err != nil { + return err + } + + sysEvtChs = make([]chan Event, 0) + go hookTermboxEvt() + + renderJobs = make(chan []Bufferer) + //renderLock = new(sync.RWMutex) + + Body = NewGrid() + Body.X = 0 + Body.Y = 0 + Body.BgColor = ThemeAttr("bg") + Body.Width = TermWidth() + + DefaultEvtStream.Init() + DefaultEvtStream.Merge("termbox", NewSysEvtCh()) + DefaultEvtStream.Merge("timer", NewTimerCh(time.Second)) + DefaultEvtStream.Merge("custom", usrEvtCh) + + DefaultEvtStream.Handle("/", DefualtHandler) + DefaultEvtStream.Handle("/sys/wnd/resize", func(e Event) { + w := e.Data.(EvtWnd) + Body.Width = w.Width + }) + + DefaultWgtMgr = NewWgtMgr() + DefaultEvtStream.Hook(DefaultWgtMgr.WgtHandlersHook()) + + go func() { + for bs := range renderJobs { + render(bs...) + } + }() + + return nil +} + +// Close finalizes termui library, +// should be called after successful initialization when termui's functionality isn't required anymore. +func Close() { + tm.Close() +} + +var renderLock sync.Mutex + +func termSync() { + renderLock.Lock() + tm.Sync() + termWidth, termHeight = tm.Size() + renderLock.Unlock() +} + +// TermWidth returns the current terminal's width. +func TermWidth() int { + termSync() + return termWidth +} + +// TermHeight returns the current terminal's height. +func TermHeight() int { + termSync() + return termHeight +} + +// Render renders all Bufferer in the given order from left to right, +// right could overlap on left ones. +func render(bs ...Bufferer) { + defer func() { + if e := recover(); e != nil { + Close() + fmt.Fprintf(os.Stderr, "Captured a panic(value=%v) when rendering Bufferer. Exit termui and clean terminal...\nPrint stack trace:\n\n", e) + //debug.PrintStack() + gs, err := stack.ParseDump(bytes.NewReader(debug.Stack()), os.Stderr) + if err != nil { + debug.PrintStack() + os.Exit(1) + } + p := &stack.Palette{} + buckets := stack.SortBuckets(stack.Bucketize(gs, stack.AnyValue)) + srcLen, pkgLen := stack.CalcLengths(buckets, false) + for _, bucket := range buckets { + io.WriteString(os.Stdout, p.BucketHeader(&bucket, false, len(buckets) > 1)) + io.WriteString(os.Stdout, p.StackLines(&bucket.Signature, srcLen, pkgLen, false)) + } + os.Exit(1) + } + }() + for _, b := range bs { + + buf := b.Buffer() + // set cels in buf + for p, c := range buf.CellMap { + if p.In(buf.Area) { + + tm.SetCell(p.X, p.Y, c.Ch, toTmAttr(c.Fg), toTmAttr(c.Bg)) + + } + } + + } + + renderLock.Lock() + // render + tm.Flush() + renderLock.Unlock() +} + +func Clear() { + tm.Clear(tm.ColorDefault, toTmAttr(ThemeAttr("bg"))) +} + +func clearArea(r image.Rectangle, bg Attribute) { + for i := r.Min.X; i < r.Max.X; i++ { + for j := r.Min.Y; j < r.Max.Y; j++ { + tm.SetCell(i, j, ' ', tm.ColorDefault, toTmAttr(bg)) + } + } +} + +func ClearArea(r image.Rectangle, bg Attribute) { + clearArea(r, bg) + tm.Flush() +} + +var renderJobs chan []Bufferer + +func Render(bs ...Bufferer) { + //go func() { renderJobs <- bs }() + renderJobs <- bs +} diff --git a/vendor/github.com/airking05/termui/sparkline.go b/vendor/github.com/airking05/termui/sparkline.go new file mode 100644 index 0000000..d906e49 --- /dev/null +++ b/vendor/github.com/airking05/termui/sparkline.go @@ -0,0 +1,167 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +// Sparkline is like: ▅▆▂▂▅▇▂▂▃▆▆▆▅▃. The data points should be non-negative integers. +/* + data := []int{4, 2, 1, 6, 3, 9, 1, 4, 2, 15, 14, 9, 8, 6, 10, 13, 15, 12, 10, 5, 3, 6, 1} + spl := termui.NewSparkline() + spl.Data = data + spl.Title = "Sparkline 0" + spl.LineColor = termui.ColorGreen +*/ +type Sparkline struct { + Data []int + Height int + Title string + TitleColor Attribute + LineColor Attribute + displayHeight int + scale float32 + max int +} + +// Sparklines is a renderable widget which groups together the given sparklines. +/* + spls := termui.NewSparklines(spl0,spl1,spl2) //... + spls.Height = 2 + spls.Width = 20 +*/ +type Sparklines struct { + Block + Lines []Sparkline + displayLines int + displayWidth int +} + +var sparks = []rune{'▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'} + +// Add appends a given Sparkline to s *Sparklines. +func (s *Sparklines) Add(sl Sparkline) { + s.Lines = append(s.Lines, sl) +} + +// NewSparkline returns a unrenderable single sparkline that intended to be added into Sparklines. +func NewSparkline() Sparkline { + return Sparkline{ + Height: 1, + TitleColor: ThemeAttr("sparkline.title.fg"), + LineColor: ThemeAttr("sparkline.line.fg")} +} + +// NewSparklines return a new *Spaklines with given Sparkline(s), you can always add a new Sparkline later. +func NewSparklines(ss ...Sparkline) *Sparklines { + s := &Sparklines{Block: *NewBlock(), Lines: ss} + return s +} + +func (sl *Sparklines) update() { + for i, v := range sl.Lines { + if v.Title == "" { + sl.Lines[i].displayHeight = v.Height + } else { + sl.Lines[i].displayHeight = v.Height + 1 + } + } + sl.displayWidth = sl.innerArea.Dx() + + // get how many lines gotta display + h := 0 + sl.displayLines = 0 + for _, v := range sl.Lines { + if h+v.displayHeight <= sl.innerArea.Dy() { + sl.displayLines++ + } else { + break + } + h += v.displayHeight + } + + for i := 0; i < sl.displayLines; i++ { + data := sl.Lines[i].Data + + max := 0 + for _, v := range data { + if max < v { + max = v + } + } + sl.Lines[i].max = max + if max != 0 { + sl.Lines[i].scale = float32(8*sl.Lines[i].Height) / float32(max) + } else { // when all negative + sl.Lines[i].scale = 0 + } + } +} + +// Buffer implements Bufferer interface. +func (sl *Sparklines) Buffer() Buffer { + buf := sl.Block.Buffer() + sl.update() + + oftY := 0 + for i := 0; i < sl.displayLines; i++ { + l := sl.Lines[i] + data := l.Data + + if len(data) > sl.innerArea.Dx() { + data = data[len(data)-sl.innerArea.Dx():] + } + + if l.Title != "" { + rs := trimStr2Runes(l.Title, sl.innerArea.Dx()) + oftX := 0 + for _, v := range rs { + w := charWidth(v) + c := Cell{ + Ch: v, + Fg: l.TitleColor, + Bg: sl.Bg, + } + x := sl.innerArea.Min.X + oftX + y := sl.innerArea.Min.Y + oftY + buf.Set(x, y, c) + oftX += w + } + } + + for j, v := range data { + // display height of the data point, zero when data is negative + h := int(float32(v)*l.scale + 0.5) + if v < 0 { + h = 0 + } + + barCnt := h / 8 + barMod := h % 8 + for jj := 0; jj < barCnt; jj++ { + c := Cell{ + Ch: ' ', // => sparks[7] + Bg: l.LineColor, + } + x := sl.innerArea.Min.X + j + y := sl.innerArea.Min.Y + oftY + l.Height - jj + + //p.Bg = sl.BgColor + buf.Set(x, y, c) + } + if barMod != 0 { + c := Cell{ + Ch: sparks[barMod-1], + Fg: l.LineColor, + Bg: sl.Bg, + } + x := sl.innerArea.Min.X + j + y := sl.innerArea.Min.Y + oftY + l.Height - barCnt + buf.Set(x, y, c) + } + } + + oftY += l.displayHeight + } + + return buf +} diff --git a/vendor/github.com/airking05/termui/table.go b/vendor/github.com/airking05/termui/table.go new file mode 100644 index 0000000..e3d1bbf --- /dev/null +++ b/vendor/github.com/airking05/termui/table.go @@ -0,0 +1,185 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "strings" + +/* Table is like: + +┌Awesome Table ────────────────────────────────────────────────┐ +│ Col0 | Col1 | Col2 | Col3 | Col4 | Col5 | Col6 | +│──────────────────────────────────────────────────────────────│ +│ Some Item #1 | AAA | 123 | CCCCC | EEEEE | GGGGG | IIIII | +│──────────────────────────────────────────────────────────────│ +│ Some Item #2 | BBB | 456 | DDDDD | FFFFF | HHHHH | JJJJJ | +└──────────────────────────────────────────────────────────────┘ + +Datapoints are a two dimensional array of strings: [][]string + +Example: + data := [][]string{ + {"Col0", "Col1", "Col3", "Col4", "Col5", "Col6"}, + {"Some Item #1", "AAA", "123", "CCCCC", "EEEEE", "GGGGG", "IIIII"}, + {"Some Item #2", "BBB", "456", "DDDDD", "FFFFF", "HHHHH", "JJJJJ"}, + } + + table := termui.NewTable() + table.Rows = data // type [][]string + table.FgColor = termui.ColorWhite + table.BgColor = termui.ColorDefault + table.Height = 7 + table.Width = 62 + table.Y = 0 + table.X = 0 + table.Border = true +*/ + +// Table tracks all the attributes of a Table instance +type Table struct { + Block + Rows [][]string + CellWidth []int + FgColor Attribute + BgColor Attribute + FgColors []Attribute + BgColors []Attribute + Separator bool + TextAlign Align +} + +// NewTable returns a new Table instance +func NewTable() *Table { + table := &Table{Block: *NewBlock()} + table.FgColor = ColorWhite + table.BgColor = ColorDefault + table.Separator = true + return table +} + +// CellsWidth calculates the width of a cell array and returns an int +func cellsWidth(cells []Cell) int { + width := 0 + for _, c := range cells { + width += c.Width() + } + return width +} + +// Analysis generates and returns an array of []Cell that represent all columns in the Table +func (table *Table) Analysis() [][]Cell { + var rowCells [][]Cell + length := len(table.Rows) + if length < 1 { + return rowCells + } + + if len(table.FgColors) == 0 { + table.FgColors = make([]Attribute, len(table.Rows)) + } + if len(table.BgColors) == 0 { + table.BgColors = make([]Attribute, len(table.Rows)) + } + + cellWidths := make([]int, len(table.Rows[0])) + + for y, row := range table.Rows { + if table.FgColors[y] == 0 { + table.FgColors[y] = table.FgColor + } + if table.BgColors[y] == 0 { + table.BgColors[y] = table.BgColor + } + for x, str := range row { + cells := DefaultTxBuilder.Build(str, table.FgColors[y], table.BgColors[y]) + cw := cellsWidth(cells) + if cellWidths[x] < cw { + cellWidths[x] = cw + } + rowCells = append(rowCells, cells) + } + } + table.CellWidth = cellWidths + return rowCells +} + +// SetSize calculates the table size and sets the internal value +func (table *Table) SetSize() { + length := len(table.Rows) + if table.Separator { + table.Height = length*2 + 1 + } else { + table.Height = length + 2 + } + table.Width = 2 + if length != 0 { + for _, cellWidth := range table.CellWidth { + table.Width += cellWidth + 3 + } + } +} + +// CalculatePosition ... +func (table *Table) CalculatePosition(x int, y int, coordinateX *int, coordinateY *int, cellStart *int) { + if table.Separator { + *coordinateY = table.innerArea.Min.Y + y*2 + } else { + *coordinateY = table.innerArea.Min.Y + y + } + if x == 0 { + *cellStart = table.innerArea.Min.X + } else { + *cellStart += table.CellWidth[x-1] + 3 + } + + switch table.TextAlign { + case AlignRight: + *coordinateX = *cellStart + (table.CellWidth[x] - len(table.Rows[y][x])) + 2 + case AlignCenter: + *coordinateX = *cellStart + (table.CellWidth[x]-len(table.Rows[y][x]))/2 + 2 + default: + *coordinateX = *cellStart + 2 + } +} + +// Buffer ... +func (table *Table) Buffer() Buffer { + buffer := table.Block.Buffer() + rowCells := table.Analysis() + pointerX := table.innerArea.Min.X + 2 + pointerY := table.innerArea.Min.Y + borderPointerX := table.innerArea.Min.X + for y, row := range table.Rows { + for x := range row { + table.CalculatePosition(x, y, &pointerX, &pointerY, &borderPointerX) + background := DefaultTxBuilder.Build(strings.Repeat(" ", table.CellWidth[x]+3), table.BgColors[y], table.BgColors[y]) + cells := rowCells[y*len(row)+x] + for i, back := range background { + buffer.Set(borderPointerX+i, pointerY, back) + } + + coordinateX := pointerX + for _, printer := range cells { + buffer.Set(coordinateX, pointerY, printer) + coordinateX += printer.Width() + } + + if x != 0 { + dividors := DefaultTxBuilder.Build("|", table.FgColors[y], table.BgColors[y]) + for _, dividor := range dividors { + buffer.Set(borderPointerX, pointerY, dividor) + } + } + } + + if table.Separator { + border := DefaultTxBuilder.Build(strings.Repeat("─", table.Width-2), table.FgColor, table.BgColor) + for i, cell := range border { + buffer.Set(i+1, pointerY+1, cell) + } + } + } + + return buffer +} diff --git a/vendor/github.com/airking05/termui/textbuilder.go b/vendor/github.com/airking05/termui/textbuilder.go new file mode 100644 index 0000000..12e2055 --- /dev/null +++ b/vendor/github.com/airking05/termui/textbuilder.go @@ -0,0 +1,278 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "regexp" + "strings" + + "github.com/mitchellh/go-wordwrap" +) + +// TextBuilder is a minimal interface to produce text []Cell using specific syntax (markdown). +type TextBuilder interface { + Build(s string, fg, bg Attribute) []Cell +} + +// DefaultTxBuilder is set to be MarkdownTxBuilder. +var DefaultTxBuilder = NewMarkdownTxBuilder() + +// MarkdownTxBuilder implements TextBuilder interface, using markdown syntax. +type MarkdownTxBuilder struct { + baseFg Attribute + baseBg Attribute + plainTx []rune + markers []marker +} + +type marker struct { + st int + ed int + fg Attribute + bg Attribute +} + +var colorMap = map[string]Attribute{ + "red": ColorRed, + "blue": ColorBlue, + "black": ColorBlack, + "cyan": ColorCyan, + "yellow": ColorYellow, + "white": ColorWhite, + "default": ColorDefault, + "green": ColorGreen, + "magenta": ColorMagenta, +} + +var attrMap = map[string]Attribute{ + "bold": AttrBold, + "underline": AttrUnderline, + "reverse": AttrReverse, +} + +func rmSpc(s string) string { + reg := regexp.MustCompile(`\s+`) + return reg.ReplaceAllString(s, "") +} + +// readAttr translates strings like `fg-red,fg-bold,bg-white` to fg and bg Attribute +func (mtb MarkdownTxBuilder) readAttr(s string) (Attribute, Attribute) { + fg := mtb.baseFg + bg := mtb.baseBg + + updateAttr := func(a Attribute, attrs []string) Attribute { + for _, s := range attrs { + // replace the color + if c, ok := colorMap[s]; ok { + a &= 0xFF00 // erase clr 0 ~ 8 bits + a |= c // set clr + } + // add attrs + if c, ok := attrMap[s]; ok { + a |= c + } + } + return a + } + + ss := strings.Split(s, ",") + fgs := []string{} + bgs := []string{} + for _, v := range ss { + subs := strings.Split(v, "-") + if len(subs) > 1 { + if subs[0] == "fg" { + fgs = append(fgs, subs[1]) + } + if subs[0] == "bg" { + bgs = append(bgs, subs[1]) + } + } + } + + fg = updateAttr(fg, fgs) + bg = updateAttr(bg, bgs) + return fg, bg +} + +func (mtb *MarkdownTxBuilder) reset() { + mtb.plainTx = []rune{} + mtb.markers = []marker{} +} + +// parse streams and parses text into normalized text and render sequence. +func (mtb *MarkdownTxBuilder) parse(str string) { + rs := str2runes(str) + normTx := []rune{} + square := []rune{} + brackt := []rune{} + accSquare := false + accBrackt := false + cntSquare := 0 + + reset := func() { + square = []rune{} + brackt = []rune{} + accSquare = false + accBrackt = false + cntSquare = 0 + } + // pipe stacks into normTx and clear + rollback := func() { + normTx = append(normTx, square...) + normTx = append(normTx, brackt...) + reset() + } + // chop first and last + chop := func(s []rune) []rune { + return s[1 : len(s)-1] + } + + for i, r := range rs { + switch { + // stacking brackt + case accBrackt: + brackt = append(brackt, r) + if ')' == r { + fg, bg := mtb.readAttr(string(chop(brackt))) + st := len(normTx) + ed := len(normTx) + len(square) - 2 + mtb.markers = append(mtb.markers, marker{st, ed, fg, bg}) + normTx = append(normTx, chop(square)...) + reset() + } else if i+1 == len(rs) { + rollback() + } + // stacking square + case accSquare: + switch { + // squares closed and followed by a '(' + case cntSquare == 0 && '(' == r: + accBrackt = true + brackt = append(brackt, '(') + // squares closed but not followed by a '(' + case cntSquare == 0: + rollback() + if '[' == r { + accSquare = true + cntSquare = 1 + brackt = append(brackt, '[') + } else { + normTx = append(normTx, r) + } + // hit the end + case i+1 == len(rs): + square = append(square, r) + rollback() + case '[' == r: + cntSquare++ + square = append(square, '[') + case ']' == r: + cntSquare-- + square = append(square, ']') + // normal char + default: + square = append(square, r) + } + // stacking normTx + default: + if '[' == r { + accSquare = true + cntSquare = 1 + square = append(square, '[') + } else { + normTx = append(normTx, r) + } + } + } + + mtb.plainTx = normTx +} + +func wrapTx(cs []Cell, wl int) []Cell { + tmpCell := make([]Cell, len(cs)) + copy(tmpCell, cs) + + // get the plaintext + plain := CellsToStr(cs) + + // wrap + plainWrapped := wordwrap.WrapString(plain, uint(wl)) + + // find differences and insert + finalCell := tmpCell // finalcell will get the inserts and is what is returned + + plainRune := []rune(plain) + plainWrappedRune := []rune(plainWrapped) + trigger := "go" + plainRuneNew := plainRune + + for trigger != "stop" { + plainRune = plainRuneNew + for i := range plainRune { + if plainRune[i] == plainWrappedRune[i] { + trigger = "stop" + } else if plainRune[i] != plainWrappedRune[i] && plainWrappedRune[i] == 10 { + trigger = "go" + cell := Cell{10, 0, 0} + j := i - 0 + + // insert a cell into the []Cell in correct position + tmpCell[i] = cell + + // insert the newline into plain so we avoid indexing errors + plainRuneNew = append(plainRune, 10) + copy(plainRuneNew[j+1:], plainRuneNew[j:]) + plainRuneNew[j] = plainWrappedRune[j] + + // restart the inner for loop until plain and plain wrapped are + // the same; yeah, it's inefficient, but the text amounts + // should be small + break + + } else if plainRune[i] != plainWrappedRune[i] && + plainWrappedRune[i-1] == 10 && // if the prior rune is a newline + plainRune[i] == 32 { // and this rune is a space + trigger = "go" + // need to delete plainRune[i] because it gets rid of an extra + // space + plainRuneNew = append(plainRune[:i], plainRune[i+1:]...) + break + + } else { + trigger = "stop" // stops the outer for loop + } + } + } + + finalCell = tmpCell + + return finalCell +} + +// Build implements TextBuilder interface. +func (mtb MarkdownTxBuilder) Build(s string, fg, bg Attribute) []Cell { + mtb.baseFg = fg + mtb.baseBg = bg + mtb.reset() + mtb.parse(s) + cs := make([]Cell, len(mtb.plainTx)) + for i := range cs { + cs[i] = Cell{Ch: mtb.plainTx[i], Fg: fg, Bg: bg} + } + for _, mrk := range mtb.markers { + for i := mrk.st; i < mrk.ed; i++ { + cs[i].Fg = mrk.fg + cs[i].Bg = mrk.bg + } + } + + return cs +} + +// NewMarkdownTxBuilder returns a TextBuilder employing markdown syntax. +func NewMarkdownTxBuilder() TextBuilder { + return MarkdownTxBuilder{} +} diff --git a/vendor/github.com/airking05/termui/theme.go b/vendor/github.com/airking05/termui/theme.go new file mode 100644 index 0000000..21fb3bf --- /dev/null +++ b/vendor/github.com/airking05/termui/theme.go @@ -0,0 +1,140 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import "strings" + +/* +// A ColorScheme represents the current look-and-feel of the dashboard. +type ColorScheme struct { + BodyBg Attribute + BlockBg Attribute + HasBorder bool + BorderFg Attribute + BorderBg Attribute + BorderLabelTextFg Attribute + BorderLabelTextBg Attribute + ParTextFg Attribute + ParTextBg Attribute + SparklineLine Attribute + SparklineTitle Attribute + GaugeBar Attribute + GaugePercent Attribute + LineChartLine Attribute + LineChartAxes Attribute + ListItemFg Attribute + ListItemBg Attribute + BarChartBar Attribute + BarChartText Attribute + BarChartNum Attribute + MBarChartBar Attribute + MBarChartText Attribute + MBarChartNum Attribute + TabActiveBg Attribute +} + +// default color scheme depends on the user's terminal setting. +var themeDefault = ColorScheme{HasBorder: true} + +var themeHelloWorld = ColorScheme{ + BodyBg: ColorBlack, + BlockBg: ColorBlack, + HasBorder: true, + BorderFg: ColorWhite, + BorderBg: ColorBlack, + BorderLabelTextBg: ColorBlack, + BorderLabelTextFg: ColorGreen, + ParTextBg: ColorBlack, + ParTextFg: ColorWhite, + SparklineLine: ColorMagenta, + SparklineTitle: ColorWhite, + GaugeBar: ColorRed, + GaugePercent: ColorWhite, + LineChartLine: ColorYellow | AttrBold, + LineChartAxes: ColorWhite, + ListItemBg: ColorBlack, + ListItemFg: ColorYellow, + BarChartBar: ColorRed, + BarChartNum: ColorWhite, + BarChartText: ColorCyan, + MBarChartBar: ColorRed, + MBarChartNum: ColorWhite, + MBarChartText: ColorCyan, + TabActiveBg: ColorMagenta, +} + +var theme = themeDefault // global dep + +// Theme returns the currently used theme. +func Theme() ColorScheme { + return theme +} + +// SetTheme sets a new, custom theme. +func SetTheme(newTheme ColorScheme) { + theme = newTheme +} + +// UseTheme sets a predefined scheme. Currently available: "hello-world" and +// "black-and-white". +func UseTheme(th string) { + switch th { + case "helloworld": + theme = themeHelloWorld + default: + theme = themeDefault + } +} +*/ + +var ColorMap = map[string]Attribute{ + "fg": ColorWhite, + "bg": ColorDefault, + "border.fg": ColorWhite, + "label.fg": ColorGreen, + "par.fg": ColorYellow, + "par.label.bg": ColorWhite, +} + +func ThemeAttr(name string) Attribute { + return lookUpAttr(ColorMap, name) +} + +func lookUpAttr(clrmap map[string]Attribute, name string) Attribute { + + a, ok := clrmap[name] + if ok { + return a + } + + ns := strings.Split(name, ".") + for i := range ns { + nn := strings.Join(ns[i:len(ns)], ".") + a, ok = ColorMap[nn] + if ok { + break + } + } + + return a +} + +// 0<=r,g,b <= 5 +func ColorRGB(r, g, b int) Attribute { + within := func(n int) int { + if n < 0 { + return 0 + } + + if n > 5 { + return 5 + } + + return n + } + + r, b, g = within(r), within(b), within(g) + return Attribute(0x0f + 36*r + 6*g + b) +} diff --git a/vendor/github.com/airking05/termui/widget.go b/vendor/github.com/airking05/termui/widget.go new file mode 100644 index 0000000..80276bf --- /dev/null +++ b/vendor/github.com/airking05/termui/widget.go @@ -0,0 +1,94 @@ +// Copyright 2017 Zack Guo . All rights reserved. +// Use of this source code is governed by a MIT license that can +// be found in the LICENSE file. + +package termui + +import ( + "fmt" + "sync" +) + +// event mixins +type WgtMgr map[string]WgtInfo + +type WgtInfo struct { + Handlers map[string]func(Event) + WgtRef Widget + Id string +} + +type Widget interface { + Id() string +} + +func NewWgtInfo(wgt Widget) WgtInfo { + return WgtInfo{ + Handlers: make(map[string]func(Event)), + WgtRef: wgt, + Id: wgt.Id(), + } +} + +func NewWgtMgr() WgtMgr { + wm := WgtMgr(make(map[string]WgtInfo)) + return wm + +} + +func (wm WgtMgr) AddWgt(wgt Widget) { + wm[wgt.Id()] = NewWgtInfo(wgt) +} + +func (wm WgtMgr) RmWgt(wgt Widget) { + wm.RmWgtById(wgt.Id()) +} + +func (wm WgtMgr) RmWgtById(id string) { + delete(wm, id) +} + +func (wm WgtMgr) AddWgtHandler(id, path string, h func(Event)) { + if w, ok := wm[id]; ok { + w.Handlers[path] = h + } +} + +func (wm WgtMgr) RmWgtHandler(id, path string) { + if w, ok := wm[id]; ok { + delete(w.Handlers, path) + } +} + +var counter struct { + sync.RWMutex + count int +} + +func GenId() string { + counter.Lock() + defer counter.Unlock() + + counter.count += 1 + return fmt.Sprintf("%d", counter.count) +} + +func (wm WgtMgr) WgtHandlersHook() func(Event) { + return func(e Event) { + for _, v := range wm { + if k := findMatch(v.Handlers, e.Path); k != "" { + v.Handlers[k](e) + } + } + } +} + +var DefaultWgtMgr WgtMgr + +func (b *Block) Handle(path string, handler func(Event)) { + if _, ok := DefaultWgtMgr[b.Id()]; !ok { + DefaultWgtMgr.AddWgt(b) + } + + DefaultWgtMgr.AddWgtHandler(b.Id(), path, handler) +} diff --git a/vendor/github.com/bmatcuk/doublestar/.gitignore b/vendor/github.com/bmatcuk/doublestar/.gitignore new file mode 100644 index 0000000..76d92ba --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/.gitignore @@ -0,0 +1,29 @@ +# vi +*~ +*.swp +*.swo + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/bmatcuk/doublestar/.travis.yml b/vendor/github.com/bmatcuk/doublestar/.travis.yml new file mode 100644 index 0000000..cf3c884 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/bmatcuk/doublestar/LICENSE b/vendor/github.com/bmatcuk/doublestar/LICENSE new file mode 100644 index 0000000..309c9d1 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Bob Matcuk + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/bmatcuk/doublestar/README.md b/vendor/github.com/bmatcuk/doublestar/README.md new file mode 100644 index 0000000..8e365c5 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/README.md @@ -0,0 +1,109 @@ +![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master) +[![Build Status](https://travis-ci.org/bmatcuk/doublestar.svg?branch=master)](https://travis-ci.org/bmatcuk/doublestar) +[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) + +# doublestar + +**doublestar** is a [golang](http://golang.org/) implementation of path pattern +matching and globbing with support for "doublestar" (aka globstar: `**`) +patterns. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +``` +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. + +## Installation + +**doublestar** can be installed via `go get`: + +```bash +go get github.com/bmatcuk/doublestar +``` + +To use it in your code, you must import it: + +```go +import "github.com/bmatcuk/doublestar" +``` + +## Functions + +### Match +```go +func Match(pattern, name string) (bool, error) +``` + +Match returns true if `name` matches the file name `pattern` +([see below](#patterns)). `name` and `pattern` are split on forward slash (`/`) +characters and may be relative or absolute. + +Note: `Match()` is meant to be a drop-in replacement for `path.Match()`. As +such, it always uses `/` as the path separator. If you are writing code that +will run on systems where `/` is not the path separator (such as Windows), you +want to use `PathMatch()` (below) instead. + + +### PathMatch +```go +func PathMatch(pattern, name string) (bool, error) +``` + +PathMatch returns true if `name` matches the file name `pattern` +([see below](#patterns)). The difference between Match and PathMatch is that +PathMatch will automatically use your system's path separator to split `name` +and `pattern`. + +`PathMatch()` is meant to be a drop-in replacement for `filepath.Match()`. + +### Glob +```go +func Glob(pattern string) ([]string, error) +``` + +Glob finds all files and directories in the filesystem that match `pattern` +([see below](#patterns)). `pattern` may be relative (to the current working +directory), or absolute. + +`Glob()` is meant to be a drop-in replacement for `filepath.Glob()`. + +## Patterns + +**doublestar** supports the following special terms in the patterns: + +Special Terms | Meaning +------------- | ------- +`*` | matches any sequence of non-path-separators +`**` | matches any sequence of characters, including path separators +`?` | matches any single non-path-separator character +`[class]` | matches any single non-path-separator character against a class of characters ([see below](#character-classes)) +`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches + +Any character with a special meaning can be escaped with a backslash (`\`). + +### Character Classes + +Character classes support the following: + +Class | Meaning +---------- | ------- +`[abc]` | matches any single character within the set +`[a-z]` | matches any single character in the range +`[^class]` | matches any single character which does *not* match the class + diff --git a/vendor/github.com/bmatcuk/doublestar/doublestar.go b/vendor/github.com/bmatcuk/doublestar/doublestar.go new file mode 100644 index 0000000..ceab4e3 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/doublestar.go @@ -0,0 +1,455 @@ +package doublestar + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "unicode/utf8" +) + +var ErrBadPattern = path.ErrBadPattern + +// Split a path on the given separator, respecting escaping. +func splitPathOnSeparator(path string, separator rune) []string { + // if the separator is '\\', then we can just split... + if separator == '\\' { + return strings.Split(path, string(separator)) + } + + // otherwise, we need to be careful of situations where the separator was escaped + cnt := strings.Count(path, string(separator)) + if cnt == 0 { + return []string{path} + } + ret := make([]string, cnt+1) + pathlen := len(path) + separatorLen := utf8.RuneLen(separator) + idx := 0 + for start := 0; start < pathlen; { + end := indexRuneWithEscaping(path[start:], separator) + if end == -1 { + end = pathlen + } else { + end += start + } + ret[idx] = path[start:end] + start = end + separatorLen + idx++ + } + return ret[:idx] +} + +// Find the first index of a rune in a string, +// ignoring any times the rune is escaped using "\". +func indexRuneWithEscaping(s string, r rune) int { + end := strings.IndexRune(s, r) + if end == -1 { + return -1 + } + if end > 0 && s[end-1] == '\\' { + start := end + utf8.RuneLen(r) + end = indexRuneWithEscaping(s[start:], r) + if end != -1 { + end += start + } + } + return end +} + +// Match returns true if name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '**' matches any sequence of characters, including +// path separators. +// '?' matches any single non-path-separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// '{' { term } [ ',' { term } ... ] '}' +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The path-separator defaults to the '/' character. The only possible +// returned error is ErrBadPattern, when pattern is malformed. +// +// Note: this is meant as a drop-in replacement for path.Match() which +// always uses '/' as the path separator. If you want to support systems +// which use a different path separator (such as Windows), what you want +// is the PathMatch() function below. +// +func Match(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, '/') +} + +// PathMatch is like Match except that it uses your system's path separator. +// For most systems, this will be '/'. However, for Windows, it would be '\\'. +// Note that for systems where the path separator is '\\', escaping is +// disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Match(). +// +func PathMatch(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, os.PathSeparator) +} + +// Match returns true if name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '**' matches any sequence of characters, including +// path separators. +// '?' matches any single non-path-separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// '{' { term } [ ',' { term } ... ] '}' +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c, unless separator is '\\' +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +func matchWithSeparator(pattern, name string, separator rune) (bool, error) { + patternComponents := splitPathOnSeparator(pattern, separator) + nameComponents := splitPathOnSeparator(name, separator) + return doMatching(patternComponents, nameComponents) +} + +func doMatching(patternComponents, nameComponents []string) (matched bool, err error) { + // check for some base-cases + patternLen, nameLen := len(patternComponents), len(nameComponents) + if patternLen == 0 && nameLen == 0 { + return true, nil + } + if patternLen == 0 || nameLen == 0 { + return false, nil + } + + patIdx, nameIdx := 0, 0 + for patIdx < patternLen && nameIdx < nameLen { + if patternComponents[patIdx] == "**" { + // if our last pattern component is a doublestar, we're done - + // doublestar will match any remaining name components, if any. + if patIdx++; patIdx >= patternLen { + return true, nil + } + + // otherwise, try matching remaining components + for ; nameIdx < nameLen; nameIdx++ { + if m, _ := doMatching(patternComponents[patIdx:], nameComponents[nameIdx:]); m { + return true, nil + } + } + return false, nil + } else { + // try matching components + matched, err = matchComponent(patternComponents[patIdx], nameComponents[nameIdx]) + if !matched || err != nil { + return + } + } + patIdx++ + nameIdx++ + } + return patIdx >= patternLen && nameIdx >= nameLen, nil +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of pattern is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// Your system path separator is automatically used. This means on +// systems where the separator is '\\' (Windows), escaping will be +// disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Glob(). +// +func Glob(pattern string) (matches []string, err error) { + patternComponents := splitPathOnSeparator(filepath.ToSlash(pattern), '/') + if len(patternComponents) == 0 { + return nil, nil + } + + // On Windows systems, this will return the drive name ('C:'), on others, + // it will return an empty string. + volumeName := filepath.VolumeName(pattern) + + // If the first pattern component is equal to the volume name, then the + // pattern is an absolute path. + if patternComponents[0] == volumeName { + return doGlob(fmt.Sprintf("%s%s", volumeName, string(os.PathSeparator)), patternComponents[1:], matches) + } + + // otherwise, it's a relative pattern + return doGlob(".", patternComponents, matches) +} + +// Perform a glob +func doGlob(basedir string, components, matches []string) (m []string, e error) { + m = matches + e = nil + + // figure out how many components we don't need to glob because they're + // just names without patterns - we'll use os.Lstat below to check if that + // path actually exists + patLen := len(components) + patIdx := 0 + for ; patIdx < patLen; patIdx++ { + if strings.IndexAny(components[patIdx], "*?[{\\") >= 0 { + break + } + } + if patIdx > 0 { + basedir = filepath.Join(basedir, filepath.Join(components[0:patIdx]...)) + } + + // Lstat will return an error if the file/directory doesn't exist + fi, err := os.Lstat(basedir) + if err != nil { + return + } + + // if there are no more components, we've found a match + if patIdx >= patLen { + m = append(m, basedir) + return + } + + // otherwise, we need to check each item in the directory... + // first, if basedir is a symlink, follow it... + if (fi.Mode() & os.ModeSymlink) != 0 { + fi, err = os.Stat(basedir) + if err != nil { + return + } + } + + // confirm it's a directory... + if !fi.IsDir() { + return + } + + // read directory + dir, err := os.Open(basedir) + if err != nil { + return + } + defer dir.Close() + + files, _ := dir.Readdir(-1) + lastComponent := (patIdx + 1) >= patLen + if components[patIdx] == "**" { + // if the current component is a doublestar, we'll try depth-first + for _, file := range files { + // if symlink, we may want to follow + if (file.Mode() & os.ModeSymlink) != 0 { + file, err = os.Stat(filepath.Join(basedir, file.Name())) + if err != nil { + continue + } + } + + if file.IsDir() { + // recurse into directories + if lastComponent { + m = append(m, filepath.Join(basedir, file.Name())) + } + m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx:], m) + } else if lastComponent { + // if the pattern's last component is a doublestar, we match filenames, too + m = append(m, filepath.Join(basedir, file.Name())) + } + } + if lastComponent { + return // we're done + } + patIdx++ + lastComponent = (patIdx + 1) >= patLen + } + + // check items in current directory and recurse + var match bool + for _, file := range files { + match, e = matchComponent(components[patIdx], file.Name()) + if e != nil { + return + } + if match { + if lastComponent { + m = append(m, filepath.Join(basedir, file.Name())) + } else { + m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx+1:], m) + } + } + } + return +} + +// Attempt to match a single pattern component with a path component +func matchComponent(pattern, name string) (bool, error) { + // check some base cases + patternLen, nameLen := len(pattern), len(name) + if patternLen == 0 && nameLen == 0 { + return true, nil + } + if patternLen == 0 { + return false, nil + } + if nameLen == 0 && pattern != "*" { + return false, nil + } + + // check for matches one rune at a time + patIdx, nameIdx := 0, 0 + for patIdx < patternLen && nameIdx < nameLen { + patRune, patAdj := utf8.DecodeRuneInString(pattern[patIdx:]) + nameRune, nameAdj := utf8.DecodeRuneInString(name[nameIdx:]) + if patRune == '\\' { + // handle escaped runes + patIdx += patAdj + patRune, patAdj = utf8.DecodeRuneInString(pattern[patIdx:]) + if patRune == utf8.RuneError { + return false, ErrBadPattern + } else if patRune == nameRune { + patIdx += patAdj + nameIdx += nameAdj + } else { + return false, nil + } + } else if patRune == '*' { + // handle stars + if patIdx += patAdj; patIdx >= patternLen { + // a star at the end of a pattern will always + // match the rest of the path + return true, nil + } + + // check if we can make any matches + for ; nameIdx < nameLen; nameIdx += nameAdj { + if m, _ := matchComponent(pattern[patIdx:], name[nameIdx:]); m { + return true, nil + } + } + return false, nil + } else if patRune == '[' { + // handle character sets + patIdx += patAdj + endClass := indexRuneWithEscaping(pattern[patIdx:], ']') + if endClass == -1 { + return false, ErrBadPattern + } + endClass += patIdx + classRunes := []rune(pattern[patIdx:endClass]) + classRunesLen := len(classRunes) + if classRunesLen > 0 { + classIdx := 0 + matchClass := false + if classRunes[0] == '^' { + classIdx++ + } + for classIdx < classRunesLen { + low := classRunes[classIdx] + if low == '-' { + return false, ErrBadPattern + } + classIdx++ + if low == '\\' { + if classIdx < classRunesLen { + low = classRunes[classIdx] + classIdx++ + } else { + return false, ErrBadPattern + } + } + high := low + if classIdx < classRunesLen && classRunes[classIdx] == '-' { + // we have a range of runes + if classIdx++; classIdx >= classRunesLen { + return false, ErrBadPattern + } + high = classRunes[classIdx] + if high == '-' { + return false, ErrBadPattern + } + classIdx++ + if high == '\\' { + if classIdx < classRunesLen { + high = classRunes[classIdx] + classIdx++ + } else { + return false, ErrBadPattern + } + } + } + if low <= nameRune && nameRune <= high { + matchClass = true + } + } + if matchClass == (classRunes[0] == '^') { + return false, nil + } + } else { + return false, ErrBadPattern + } + patIdx = endClass + 1 + nameIdx += nameAdj + } else if patRune == '{' { + // handle alternatives such as {alt1,alt2,...} + patIdx += patAdj + endOptions := indexRuneWithEscaping(pattern[patIdx:], '}') + if endOptions == -1 { + return false, ErrBadPattern + } + endOptions += patIdx + options := splitPathOnSeparator(pattern[patIdx:endOptions], ',') + patIdx = endOptions + 1 + for _, o := range options { + m, e := matchComponent(o+pattern[patIdx:], name[nameIdx:]) + if e != nil { + return false, e + } + if m { + return true, nil + } + } + return false, nil + } else if patRune == '?' || patRune == nameRune { + // handle single-rune wildcard + patIdx += patAdj + nameIdx += nameAdj + } else { + return false, nil + } + } + if patIdx >= patternLen && nameIdx >= nameLen { + return true, nil + } + if nameIdx >= nameLen && pattern[patIdx:] == "*" || pattern[patIdx:] == "**" { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/bmatcuk/doublestar/go.mod b/vendor/github.com/bmatcuk/doublestar/go.mod new file mode 100644 index 0000000..1d0378b --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/go.mod @@ -0,0 +1 @@ +module github.com/bmatcuk/doublestar diff --git a/vendor/github.com/karrick/godirwalk/.gitignore b/vendor/github.com/karrick/godirwalk/.gitignore new file mode 100644 index 0000000..a1338d6 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/karrick/godirwalk/LICENSE b/vendor/github.com/karrick/godirwalk/LICENSE new file mode 100644 index 0000000..01ce194 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/karrick/godirwalk/README.md b/vendor/github.com/karrick/godirwalk/README.md new file mode 100644 index 0000000..4f9922f --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/README.md @@ -0,0 +1,208 @@ +# godirwalk + +`godirwalk` is a library for traversing a directory tree on a file +system. + +In short, why do I use this library? + +1. It's faster than `filepath.Walk`. +1. It's more correct on Windows than `filepath.Walk`. +1. It's more easy to use than `filepath.Walk`. +1. It's more flexible than `filepath.Walk`. + +## Usage Example + +Additional examples are provided in the `examples/` subdirectory. + +This library will normalize the provided top level directory name +based on the os-specific path separator by calling `filepath.Clean` on +its first argument. However it always provides the pathname created by +using the correct os-specific path separator when invoking the +provided callback function. + +```Go + dirname := "some/directory/root" + err := godirwalk.Walk(dirname, &godirwalk.Options{ + Callback: func(osPathname string, de *godirwalk.Dirent) error { + fmt.Printf("%s %s\n", de.ModeType(), osPathname) + return nil + }, + Unsorted: true, // (optional) set true for faster yet non-deterministic enumeration (see godoc) + }) +``` + +This library not only provides functions for traversing a file system +directory tree, but also for obtaining a list of immediate descendants +of a particular directory, typically much more quickly than using +`os.ReadDir` or `os.ReadDirnames`. + +Documentation is available via +[![GoDoc](https://godoc.org/github.com/karrick/godirwalk?status.svg)](https://godoc.org/github.com/karrick/godirwalk). + +## Description + +Here's why I use `godirwalk` in preference to `filepath.Walk`, +`os.ReadDir`, and `os.ReadDirnames`. + +### It's faster than `filepath.Walk` + +When compared against `filepath.Walk` in benchmarks, it has been +observed to run between five and ten times the speed on darwin, at +speeds comparable to the that of the unix `find` utility; about twice +the speed on linux; and about four times the speed on Windows. + +How does it obtain this performance boost? It does less work to give +you nearly the same output. This library calls the same `syscall` +functions to do the work, but it makes fewer calls, does not throw +away information that it might need, and creates less memory churn +along the way by reusing the same scratch buffer rather than +reallocating a new buffer every time it reads data from the operating +system. + +While traversing a file system directory tree, `filepath.Walk` obtains +the list of immediate descendants of a directory, and throws away the +file system node type information provided by the operating system +that comes with the node's name. Then, immediately prior to invoking +the callback function, `filepath.Walk` invokes `os.Stat` for each +node, and passes the returned `os.FileInfo` information to the +callback. + +While the `os.FileInfo` information provided by `os.Stat` is extremely +helpful--and even includes the `os.FileMode` data--providing it +requires an additional system call for each node. + +Because most callbacks only care about what the node type is, this +library does not throw the type information away, but rather provides +that information to the callback function in the form of a +`os.FileMode` value. Note that the provided `os.FileMode` value that +this library provides only has the node type information, and does not +have the permission bits, sticky bits, or other information from the +file's mode. If the callback does care about a particular node's +entire `os.FileInfo` data structure, the callback can easiy invoke +`os.Stat` when needed, and only when needed. + +#### Benchmarks + +##### macOS + +```Bash +go test -bench=. +goos: darwin +goarch: amd64 +pkg: github.com/karrick/godirwalk +BenchmarkFilepathWalk-8 1 3001274570 ns/op +BenchmarkGoDirWalk-8 3 465573172 ns/op +BenchmarkFlameGraphFilepathWalk-8 1 6957916936 ns/op +BenchmarkFlameGraphGoDirWalk-8 1 4210582571 ns/op +PASS +ok github.com/karrick/godirwalk 16.822s +``` + +##### Linux + +```Bash +go test -bench=. +goos: linux +goarch: amd64 +pkg: github.com/karrick/godirwalk +BenchmarkFilepathWalk-12 1 1609189170 ns/op +BenchmarkGoDirWalk-12 5 211336628 ns/op +BenchmarkFlameGraphFilepathWalk-12 1 3968119932 ns/op +BenchmarkFlameGraphGoDirWalk-12 1 2139598998 ns/op +PASS +ok github.com/karrick/godirwalk 9.007s +``` + +### It's more correct on Windows than `filepath.Walk` + +I did not previously care about this either, but humor me. We all love +how we can write once and run everywhere. It is essential for the +language's adoption, growth, and success, that the software we create +can run unmodified on all architectures and operating systems +supported by Go. + +When the traversed file system has a logical loop caused by symbolic +links to directories, on unix `filepath.Walk` ignores symbolic links +and traverses the entire directory tree without error. On Windows +however, `filepath.Walk` will continue following directory symbolic +links, even though it is not supposed to, eventually causing +`filepath.Walk` to terminate early and return an error when the +pathname gets too long from concatenating endless loops of symbolic +links onto the pathname. This error comes from Windows, passes through +`filepath.Walk`, and to the upstream client running `filepath.Walk`. + +The takeaway is that behavior is different based on which platform +`filepath.Walk` is running. While this is clearly not intentional, +until it is fixed in the standard library, it presents a compatibility +problem. + +This library correctly identifies symbolic links that point to +directories and will only follow them when `FollowSymbolicLinks` is +set to true. Behavior on Windows and other operating systems is +identical. + +### It's more easy to use than `filepath.Walk` + +Since this library does not invoke `os.Stat` on every file system node +it encounters, there is no possible error event for the callback +function to filter on. The third argument in the `filepath.WalkFunc` +function signature to pass the error from `os.Stat` to the callback +function is no longer necessary, and thus eliminated from signature of +the callback function from this library. + +Also, `filepath.Walk` invokes the callback function with a solidus +delimited pathname regardless of the os-specific path separator. This +library invokes the callback function with the os-specific pathname +separator, obviating a call to `filepath.Clean` in the callback +function for each node prior to actually using the provided pathname. + +In other words, even on Windows, `filepath.Walk` will invoke the +callback with `some/path/to/foo.txt`, requiring well written clients +to perform pathname normalization for every file prior to working with +the specified file. In truth, many clients developed on unix and not +tested on Windows neglect this subtlety, and will result in software +bugs when running on Windows. This library would invoke the callback +function with `some\path\to\foo.txt` for the same file when running on +Windows, eliminating the need to normalize the pathname by the client, +and lessen the likelyhood that a client will work on unix but not on +Windows. + +### It's more flexible than `filepath.Walk` + +#### Configurable Handling of Symbolic Links + +The default behavior of this library is to ignore symbolic links to +directories when walking a directory tree, just like `filepath.Walk` +does. However, it does invoke the callback function with each node it +finds, including symbolic links. If a particular use case exists to +follow symbolic links when traversing a directory tree, this library +can be invoked in manner to do so, by setting the +`FollowSymbolicLinks` parameter to true. + +#### Configurable Sorting of Directory Children + +The default behavior of this library is to always sort the immediate +descendants of a directory prior to visiting each node, just like +`filepath.Walk` does. This is usually the desired behavior. However, +this does come at a performance penalty to sort the names when a +directory node has many entries. If a particular use case exists that +does not require sorting the directory's immediate descendants prior +to visiting its nodes, this library will skip the sorting step when +the `Unsorted` parameter is set to true. + +#### Configurable Post Children Callback + +This library provides upstream code with the ability to specify a +callback to be invoked for each directory after its children are +processed. This has been used to recursively delete empty directories +after traversing the file system in a more efficient manner. See the +`examples/clean-empties` directory for an example of this usage. + +#### Configurable Error Callback + +This library provides upstream code with the ability to specify a +callback to be invoked for errors that the operating system returns, +allowing the upstream code to determine the next course of action to +take, whether to halt walking the hierarchy, as it would do were no +error callback provided, or skip the node that caused the error. See +the `examples/walk-fast` directory for an example of this usage. diff --git a/vendor/github.com/karrick/godirwalk/dirent.go b/vendor/github.com/karrick/godirwalk/dirent.go new file mode 100644 index 0000000..5a27722 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/dirent.go @@ -0,0 +1,74 @@ +package godirwalk + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// Dirent stores the name and file system mode type of discovered file system +// entries. +type Dirent struct { + name string + modeType os.FileMode +} + +// NewDirent returns a newly initialized Dirent structure, or an error. This +// function does not follow symbolic links. +// +// This function is rarely used, as Dirent structures are provided by other +// functions in this library that read and walk directories. +func NewDirent(osPathname string) (*Dirent, error) { + fi, err := os.Lstat(osPathname) + if err != nil { + return nil, errors.Wrap(err, "cannot lstat") + } + return &Dirent{ + name: filepath.Base(osPathname), + modeType: fi.Mode() & os.ModeType, + }, nil +} + +// Name returns the basename of the file system entry. +func (de Dirent) Name() string { return de.name } + +// ModeType returns the mode bits that specify the file system node type. We +// could make our own enum-like data type for encoding the file type, but Go's +// runtime already gives us architecture independent file modes, as discussed in +// `os/types.go`: +// +// Go's runtime FileMode type has same definition on all systems, so that +// information about files can be moved from one system to another portably. +func (de Dirent) ModeType() os.FileMode { return de.modeType } + +// IsDir returns true if and only if the Dirent represents a file system +// directory. Note that on some operating systems, more than one file mode bit +// may be set for a node. For instance, on Windows, a symbolic link that points +// to a directory will have both the directory and the symbolic link bits set. +func (de Dirent) IsDir() bool { return de.modeType&os.ModeDir != 0 } + +// IsRegular returns true if and only if the Dirent represents a regular +// file. That is, it ensures that no mode type bits are set. +func (de Dirent) IsRegular() bool { return de.modeType&os.ModeType == 0 } + +// IsSymlink returns true if and only if the Dirent represents a file system +// symbolic link. Note that on some operating systems, more than one file mode +// bit may be set for a node. For instance, on Windows, a symbolic link that +// points to a directory will have both the directory and the symbolic link bits +// set. +func (de Dirent) IsSymlink() bool { return de.modeType&os.ModeSymlink != 0 } + +// Dirents represents a slice of Dirent pointers, which are sortable by +// name. This type satisfies the `sort.Interface` interface. +type Dirents []*Dirent + +// Len returns the count of Dirent structures in the slice. +func (l Dirents) Len() int { return len(l) } + +// Less returns true if and only if the Name of the element specified by the +// first index is lexicographically less than that of the second index. +func (l Dirents) Less(i, j int) bool { return l[i].name < l[j].name } + +// Swap exchanges the two Dirent entries specified by the two provided indexes. +func (l Dirents) Swap(i, j int) { l[i], l[j] = l[j], l[i] } diff --git a/vendor/github.com/karrick/godirwalk/doc.go b/vendor/github.com/karrick/godirwalk/doc.go new file mode 100644 index 0000000..0dfdabd --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/doc.go @@ -0,0 +1,34 @@ +/* +Package godirwalk provides functions to read and traverse directory trees. + +In short, why do I use this library? + +* It's faster than `filepath.Walk`. + +* It's more correct on Windows than `filepath.Walk`. + +* It's more easy to use than `filepath.Walk`. + +* It's more flexible than `filepath.Walk`. + +USAGE + +This library will normalize the provided top level directory name based on the +os-specific path separator by calling `filepath.Clean` on its first +argument. However it always provides the pathname created by using the correct +os-specific path separator when invoking the provided callback function. + + dirname := "some/directory/root" + err := godirwalk.Walk(dirname, &godirwalk.Options{ + Callback: func(osPathname string, de *godirwalk.Dirent) error { + fmt.Printf("%s %s\n", de.ModeType(), osPathname) + return nil + }, + }) + +This library not only provides functions for traversing a file system directory +tree, but also for obtaining a list of immediate descendants of a particular +directory, typically much more quickly than using `os.ReadDir` or +`os.ReadDirnames`. +*/ +package godirwalk diff --git a/vendor/github.com/karrick/godirwalk/go.mod b/vendor/github.com/karrick/godirwalk/go.mod new file mode 100644 index 0000000..6b467a9 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/go.mod @@ -0,0 +1,3 @@ +module github.com/karrick/godirwalk + +require github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/karrick/godirwalk/go.sum b/vendor/github.com/karrick/godirwalk/go.sum new file mode 100644 index 0000000..3dfe462 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/go.sum @@ -0,0 +1,2 @@ +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/karrick/godirwalk/readdir.go b/vendor/github.com/karrick/godirwalk/readdir.go new file mode 100644 index 0000000..2bba689 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/readdir.go @@ -0,0 +1,47 @@ +package godirwalk + +// ReadDirents returns a sortable slice of pointers to Dirent structures, each +// representing the file system name and mode type for one of the immediate +// descendant of the specified directory. If the specified directory is a +// symbolic link, it will be resolved. +// +// If an optional scratch buffer is provided that is at least one page of +// memory, it will be used when reading directory entries from the file system. +// +// children, err := godirwalk.ReadDirents(osDirname, nil) +// if err != nil { +// return nil, errors.Wrap(err, "cannot get list of directory children") +// } +// sort.Sort(children) +// for _, child := range children { +// fmt.Printf("%s %s\n", child.ModeType, child.Name) +// } +func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) { + return readdirents(osDirname, scratchBuffer) +} + +// ReadDirnames returns a slice of strings, representing the immediate +// descendants of the specified directory. If the specified directory is a +// symbolic link, it will be resolved. +// +// If an optional scratch buffer is provided that is at least one page of +// memory, it will be used when reading directory entries from the file system. +// +// Note that this function, depending on operating system, may or may not invoke +// the ReadDirents function, in order to prepare the list of immediate +// descendants. Therefore, if your program needs both the names and the file +// system mode types of descendants, it will always be faster to invoke +// ReadDirents directly, rather than calling this function, then looping over +// the results and calling os.Stat for each child. +// +// children, err := godirwalk.ReadDirnames(osDirname, nil) +// if err != nil { +// return nil, errors.Wrap(err, "cannot get list of directory children") +// } +// sort.Strings(children) +// for _, child := range children { +// fmt.Printf("%s\n", child) +// } +func ReadDirnames(osDirname string, scratchBuffer []byte) ([]string, error) { + return readdirnames(osDirname, scratchBuffer) +} diff --git a/vendor/github.com/karrick/godirwalk/readdir_unix.go b/vendor/github.com/karrick/godirwalk/readdir_unix.go new file mode 100644 index 0000000..04a628f --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/readdir_unix.go @@ -0,0 +1,109 @@ +// +build darwin freebsd linux netbsd openbsd + +package godirwalk + +import ( + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/pkg/errors" +) + +func readdirents(osDirname string, scratchBuffer []byte) (Dirents, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + var entries Dirents + + fd := int(dh.Fd()) + + if len(scratchBuffer) < MinimumScratchBufferSize { + scratchBuffer = make([]byte, DefaultScratchBufferSize) + } + + var de *syscall.Dirent + + for { + n, err := syscall.ReadDirent(fd, scratchBuffer) + if err != nil { + _ = dh.Close() // ignore potential error returned by Close + return nil, errors.Wrap(err, "cannot ReadDirent") + } + if n <= 0 { + break // end of directory reached + } + // Loop over the bytes returned by reading the directory entries. + buf := scratchBuffer[:n] + for len(buf) > 0 { + de = (*syscall.Dirent)(unsafe.Pointer(&buf[0])) // point entry to first syscall.Dirent in buffer + buf = buf[de.Reclen:] // advance buffer + + if inoFromDirent(de) == 0 { + continue // this item has been deleted, but not yet removed from directory + } + + nameSlice := nameFromDirent(de) + namlen := len(nameSlice) + if (namlen == 0) || (namlen == 1 && nameSlice[0] == '.') || (namlen == 2 && nameSlice[0] == '.' && nameSlice[1] == '.') { + continue // skip unimportant entries + } + osChildname := string(nameSlice) + + // Convert syscall constant, which is in purview of OS, to a + // constant defined by Go, assumed by this project to be stable. + var mode os.FileMode + switch de.Type { + case syscall.DT_REG: + // regular file + case syscall.DT_DIR: + mode = os.ModeDir + case syscall.DT_LNK: + mode = os.ModeSymlink + case syscall.DT_CHR: + mode = os.ModeDevice | os.ModeCharDevice + case syscall.DT_BLK: + mode = os.ModeDevice + case syscall.DT_FIFO: + mode = os.ModeNamedPipe + case syscall.DT_SOCK: + mode = os.ModeSocket + default: + // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), + // then resolve actual mode by getting stat. + fi, err := os.Lstat(filepath.Join(osDirname, osChildname)) + if err != nil { + _ = dh.Close() // ignore potential error returned by Close + return nil, errors.Wrap(err, "cannot Stat") + } + // We only care about the bits that identify the type of a file + // system node, and can ignore append, exclusive, temporary, + // setuid, setgid, permission bits, and sticky bits, which are + // coincident to the bits that declare type of the file system + // node. + mode = fi.Mode() & os.ModeType + } + + entries = append(entries, &Dirent{name: osChildname, modeType: mode}) + } + } + if err = dh.Close(); err != nil { + return nil, err + } + return entries, nil +} + +func readdirnames(osDirname string, scratchBuffer []byte) ([]string, error) { + des, err := readdirents(osDirname, scratchBuffer) + if err != nil { + return nil, err + } + names := make([]string, len(des)) + for i, v := range des { + names[i] = v.name + } + return names, nil +} diff --git a/vendor/github.com/karrick/godirwalk/readdir_windows.go b/vendor/github.com/karrick/godirwalk/readdir_windows.go new file mode 100644 index 0000000..885a067 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/readdir_windows.go @@ -0,0 +1,54 @@ +package godirwalk + +import ( + "os" + + "github.com/pkg/errors" +) + +// The functions in this file are mere wrappers of what is already provided by +// standard library, in order to provide the same API as this library provides. +// +// The scratch buffer argument is ignored by this architecture. +// +// Please send PR or link to article if you know of a more performant way of +// enumerating directory contents and mode types on Windows. + +func readdirents(osDirname string, _ []byte) (Dirents, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + fileinfos, err := dh.Readdir(0) + if er := dh.Close(); err == nil { + err = er + } + if err != nil { + return nil, errors.Wrap(err, "cannot Readdir") + } + + entries := make(Dirents, len(fileinfos)) + for i, info := range fileinfos { + entries[i] = &Dirent{name: info.Name(), modeType: info.Mode() & os.ModeType} + } + + return entries, nil +} + +func readdirnames(osDirname string, _ []byte) ([]string, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, errors.Wrap(err, "cannot Open") + } + + entries, err := dh.Readdirnames(0) + if er := dh.Close(); err == nil { + err = er + } + if err != nil { + return nil, errors.Wrap(err, "cannot Readdirnames") + } + + return entries, nil +} diff --git a/vendor/github.com/karrick/godirwalk/walk.go b/vendor/github.com/karrick/godirwalk/walk.go new file mode 100644 index 0000000..4c184ab --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/walk.go @@ -0,0 +1,367 @@ +package godirwalk + +import ( + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" +) + +// DefaultScratchBufferSize specifies the size of the scratch buffer that will +// be allocated by Walk, ReadDirents, or ReadDirnames when a scratch buffer is +// not provided or the scratch buffer that is provided is smaller than +// MinimumScratchBufferSize bytes. This may seem like a large value; however, +// when a program intends to enumerate large directories, having a larger +// scratch buffer results in fewer operating system calls. +const DefaultScratchBufferSize = 64 * 1024 + +// MinimumScratchBufferSize specifies the minimum size of the scratch buffer +// that Walk, ReadDirents, and ReadDirnames will use when reading file entries +// from the operating system. It is initialized to the result from calling +// `os.Getpagesize()` during program startup. +var MinimumScratchBufferSize int + +func init() { + MinimumScratchBufferSize = os.Getpagesize() +} + +// Options provide parameters for how the Walk function operates. +type Options struct { + // ErrorCallback specifies a function to be invoked in the case of an error + // that could potentially be ignored while walking a file system + // hierarchy. When set to nil or left as its zero-value, any error condition + // causes Walk to immediately return the error describing what took + // place. When non-nil, this user supplied function is invoked with the OS + // pathname of the file system object that caused the error along with the + // error that took place. The return value of the supplied ErrorCallback + // function determines whether the error will cause Walk to halt immediately + // as it would were no ErrorCallback value provided, or skip this file + // system node yet continue on with the remaining nodes in the file system + // hierarchy. + // + // ErrorCallback is invoked both for errors that are returned by the + // runtime, and for errors returned by other user supplied callback + // functions. + ErrorCallback func(string, error) ErrorAction + + // FollowSymbolicLinks specifies whether Walk will follow symbolic links + // that refer to directories. When set to false or left as its zero-value, + // Walk will still invoke the callback function with symbolic link nodes, + // but if the symbolic link refers to a directory, it will not recurse on + // that directory. When set to true, Walk will recurse on symbolic links + // that refer to a directory. + FollowSymbolicLinks bool + + // Unsorted controls whether or not Walk will sort the immediate descendants + // of a directory by their relative names prior to visiting each of those + // entries. + // + // When set to false or left at its zero-value, Walk will get the list of + // immediate descendants of a particular directory, sort that list by + // lexical order of their names, and then visit each node in the list in + // sorted order. This will cause Walk to always traverse the same directory + // tree in the same order, however may be inefficient for directories with + // many immediate descendants. + // + // When set to true, Walk skips sorting the list of immediate descendants + // for a directory, and simply visits each node in the order the operating + // system enumerated them. This will be more fast, but with the side effect + // that the traversal order may be different from one invocation to the + // next. + Unsorted bool + + // Callback is a required function that Walk will invoke for every file + // system node it encounters. + Callback WalkFunc + + // PostChildrenCallback is an option function that Walk will invoke for + // every file system directory it encounters after its children have been + // processed. + PostChildrenCallback WalkFunc + + // ScratchBuffer is an optional byte slice to use as a scratch buffer for + // Walk to use when reading directory entries, to reduce amount of garbage + // generation. Not all architectures take advantage of the scratch + // buffer. If omitted or the provided buffer has fewer bytes than + // MinimumScratchBufferSize, then a buffer with DefaultScratchBufferSize + // bytes will be created and used once per Walk invocation. + ScratchBuffer []byte +} + +// ErrorAction defines a set of actions the Walk function could take based on +// the occurrence of an error while walking the file system. See the +// documentation for the ErrorCallback field of the Options structure for more +// information. +type ErrorAction int + +const ( + // Halt is the ErrorAction return value when the upstream code wants to halt + // the walk process when a runtime error takes place. It matches the default + // action the Walk function would take were no ErrorCallback provided. + Halt ErrorAction = iota + + // SkipNode is the ErrorAction return value when the upstream code wants to + // ignore the runtime error for the current file system node, skip + // processing of the node that caused the error, and continue walking the + // file system hierarchy with the remaining nodes. + SkipNode +) + +// WalkFunc is the type of the function called for each file system node visited +// by Walk. The pathname argument will contain the argument to Walk as a prefix; +// that is, if Walk is called with "dir", which is a directory containing the +// file "a", the provided WalkFunc will be invoked with the argument "dir/a", +// using the correct os.PathSeparator for the Go Operating System architecture, +// GOOS. The directory entry argument is a pointer to a Dirent for the node, +// providing access to both the basename and the mode type of the file system +// node. +// +// If an error is returned by the Callback or PostChildrenCallback functions, +// and no ErrorCallback function is provided, processing stops. If an +// ErrorCallback function is provided, then it is invoked with the OS pathname +// of the node that caused the error along along with the error. The return +// value of the ErrorCallback function determines whether to halt processing, or +// skip this node and continue processing remaining file system nodes. +// +// The exception is when the function returns the special value +// filepath.SkipDir. If the function returns filepath.SkipDir when invoked on a +// directory, Walk skips the directory's contents entirely. If the function +// returns filepath.SkipDir when invoked on a non-directory file system node, +// Walk skips the remaining files in the containing directory. Note that any +// supplied ErrorCallback function is not invoked with filepath.SkipDir when the +// Callback or PostChildrenCallback functions return that special value. +type WalkFunc func(osPathname string, directoryEntry *Dirent) error + +// Walk walks the file tree rooted at the specified directory, calling the +// specified callback function for each file system node in the tree, including +// root, symbolic links, and other node types. The nodes are walked in lexical +// order, which makes the output deterministic but means that for very large +// directories this function can be inefficient. +// +// This function is often much faster than filepath.Walk because it does not +// invoke os.Stat for every node it encounters, but rather obtains the file +// system node type when it reads the parent directory. +// +// If a runtime error occurs, either from the operating system or from the +// upstream Callback or PostChildrenCallback functions, processing typically +// halts. However, when an ErrorCallback function is provided in the provided +// Options structure, that function is invoked with the error along with the OS +// pathname of the file system node that caused the error. The ErrorCallback +// function's return value determines the action that Walk will then take. +// +// func main() { +// dirname := "." +// if len(os.Args) > 1 { +// dirname = os.Args[1] +// } +// err := godirwalk.Walk(dirname, &godirwalk.Options{ +// Callback: func(osPathname string, de *godirwalk.Dirent) error { +// fmt.Printf("%s %s\n", de.ModeType(), osPathname) +// return nil +// }, +// ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { +// // Your program may want to log the error somehow. +// fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) +// +// // For the purposes of this example, a simple SkipNode will suffice, +// // although in reality perhaps additional logic might be called for. +// return godirwalk.SkipNode +// }, +// }) +// if err != nil { +// fmt.Fprintf(os.Stderr, "%s\n", err) +// os.Exit(1) +// } +// } +func Walk(pathname string, options *Options) error { + pathname = filepath.Clean(pathname) + + var fi os.FileInfo + var err error + + if options.FollowSymbolicLinks { + fi, err = os.Stat(pathname) + if err != nil { + return errors.Wrap(err, "cannot Stat") + } + } else { + fi, err = os.Lstat(pathname) + if err != nil { + return errors.Wrap(err, "cannot Lstat") + } + } + + mode := fi.Mode() + if mode&os.ModeDir == 0 { + return errors.Errorf("cannot Walk non-directory: %s", pathname) + } + + dirent := &Dirent{ + name: filepath.Base(pathname), + modeType: mode & os.ModeType, + } + + // If ErrorCallback is nil, set to a default value that halts the walk + // process on all operating system errors. This is done to allow error + // handling to be more succinct in the walk code. + if options.ErrorCallback == nil { + options.ErrorCallback = defaultErrorCallback + } + + if len(options.ScratchBuffer) < MinimumScratchBufferSize { + options.ScratchBuffer = make([]byte, DefaultScratchBufferSize) + } + + err = walk(pathname, dirent, options) + if err == filepath.SkipDir { + return nil // silence SkipDir for top level + } + return err +} + +// defaultErrorCallback always returns Halt because if the upstream code did not +// provide an ErrorCallback function, walking the file system hierarchy ought to +// halt upon any operating system error. +func defaultErrorCallback(_ string, _ error) ErrorAction { return Halt } + +// walk recursively traverses the file system node specified by pathname and the +// Dirent. +func walk(osPathname string, dirent *Dirent, options *Options) error { + err := options.Callback(osPathname, dirent) + if err != nil { + if err == filepath.SkipDir { + return err + } + err = errors.Wrap(err, "Callback") // wrap potential errors returned by callback + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + // On some platforms, an entry can have more than one mode type bit set. + // For instance, it could have both the symlink bit and the directory bit + // set indicating it's a symlink to a directory. + if dirent.IsSymlink() { + if !options.FollowSymbolicLinks { + return nil + } + // Only need to Stat entry if platform did not already have os.ModeDir + // set, such as would be the case for unix like operating systems. (This + // guard eliminates extra os.Stat check on Windows.) + if !dirent.IsDir() { + referent, err := os.Readlink(osPathname) + if err != nil { + err = errors.Wrap(err, "cannot Readlink") + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + var osp string + if filepath.IsAbs(referent) { + osp = referent + } else { + osp = filepath.Join(filepath.Dir(osPathname), referent) + } + + fi, err := os.Stat(osp) + if err != nil { + err = errors.Wrap(err, "cannot Stat") + if action := options.ErrorCallback(osp, err); action == SkipNode { + return nil + } + return err + } + dirent.modeType = fi.Mode() & os.ModeType + } + } + + if !dirent.IsDir() { + return nil + } + + // If get here, then specified pathname refers to a directory. + deChildren, err := ReadDirents(osPathname, options.ScratchBuffer) + if err != nil { + err = errors.Wrap(err, "cannot ReadDirents") + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err + } + + if !options.Unsorted { + sort.Sort(deChildren) // sort children entries unless upstream says to leave unsorted + } + + for _, deChild := range deChildren { + osChildname := filepath.Join(osPathname, deChild.name) + err = walk(osChildname, deChild, options) + if err != nil { + if err != filepath.SkipDir { + return err + } + // If received skipdir on a directory, stop processing that + // directory, but continue to its siblings. If received skipdir on a + // non-directory, stop processing remaining siblings. + if deChild.IsSymlink() { + // Only need to Stat entry if platform did not already have + // os.ModeDir set, such as would be the case for unix like + // operating systems. (This guard eliminates extra os.Stat check + // on Windows.) + if !deChild.IsDir() { + // Resolve symbolic link referent to determine whether node + // is directory or not. + referent, err := os.Readlink(osChildname) + if err != nil { + err = errors.Wrap(err, "cannot Readlink") + if action := options.ErrorCallback(osChildname, err); action == SkipNode { + continue // with next child + } + return err + } + + var osp string + if filepath.IsAbs(referent) { + osp = referent + } else { + osp = filepath.Join(osPathname, referent) + } + + fi, err := os.Stat(osp) + if err != nil { + err = errors.Wrap(err, "cannot Stat") + if action := options.ErrorCallback(osp, err); action == SkipNode { + continue // with next child + } + return err + } + deChild.modeType = fi.Mode() & os.ModeType + } + } + if !deChild.IsDir() { + // If not directory, return immediately, thus skipping remainder + // of siblings. + return nil + } + } + } + + if options.PostChildrenCallback == nil { + return nil + } + + err = options.PostChildrenCallback(osPathname, dirent) + if err == nil || err == filepath.SkipDir { + return err + } + + err = errors.Wrap(err, "PostChildrenCallback") // wrap potential errors returned by callback + if action := options.ErrorCallback(osPathname, err); action == SkipNode { + return nil + } + return err +} diff --git a/vendor/github.com/karrick/godirwalk/withFileno.go b/vendor/github.com/karrick/godirwalk/withFileno.go new file mode 100644 index 0000000..1dc04a7 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/withFileno.go @@ -0,0 +1,9 @@ +// +build dragonfly freebsd openbsd netbsd + +package godirwalk + +import "syscall" + +func inoFromDirent(de *syscall.Dirent) uint64 { + return uint64(de.Fileno) +} diff --git a/vendor/github.com/karrick/godirwalk/withIno.go b/vendor/github.com/karrick/godirwalk/withIno.go new file mode 100644 index 0000000..47fc125 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/withIno.go @@ -0,0 +1,9 @@ +// +build darwin linux + +package godirwalk + +import "syscall" + +func inoFromDirent(de *syscall.Dirent) uint64 { + return de.Ino +} diff --git a/vendor/github.com/karrick/godirwalk/withNamlen.go b/vendor/github.com/karrick/godirwalk/withNamlen.go new file mode 100644 index 0000000..46a4af5 --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/withNamlen.go @@ -0,0 +1,29 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package godirwalk + +import ( + "reflect" + "syscall" + "unsafe" +) + +func nameFromDirent(de *syscall.Dirent) []byte { + // Because this GOOS' syscall.Dirent provides a Namlen field that says how + // long the name is, this function does not need to search for the NULL + // byte. + ml := int(de.Namlen) + + // Convert syscall.Dirent.Name, which is array of int8, to []byte, by + // overwriting Cap, Len, and Data slice header fields to values from + // syscall.Dirent fields. Setting the Cap, Len, and Data field values for + // the slice header modifies what the slice header points to, and in this + // case, the name buffer. + var name []byte + sh := (*reflect.SliceHeader)(unsafe.Pointer(&name)) + sh.Cap = ml + sh.Len = ml + sh.Data = uintptr(unsafe.Pointer(&de.Name[0])) + + return name +} diff --git a/vendor/github.com/karrick/godirwalk/withoutNamlen.go b/vendor/github.com/karrick/godirwalk/withoutNamlen.go new file mode 100644 index 0000000..dcf9f3a --- /dev/null +++ b/vendor/github.com/karrick/godirwalk/withoutNamlen.go @@ -0,0 +1,36 @@ +// +build nacl linux solaris + +package godirwalk + +import ( + "bytes" + "reflect" + "syscall" + "unsafe" +) + +func nameFromDirent(de *syscall.Dirent) []byte { + // Because this GOOS' syscall.Dirent does not provide a field that specifies + // the name length, this function must first calculate the max possible name + // length, and then search for the NULL byte. + ml := int(uint64(de.Reclen) - uint64(unsafe.Offsetof(syscall.Dirent{}.Name))) + + // Convert syscall.Dirent.Name, which is array of int8, to []byte, by + // overwriting Cap, Len, and Data slice header fields to values from + // syscall.Dirent fields. Setting the Cap, Len, and Data field values for + // the slice header modifies what the slice header points to, and in this + // case, the name buffer. + var name []byte + sh := (*reflect.SliceHeader)(unsafe.Pointer(&name)) + sh.Cap = ml + sh.Len = ml + sh.Data = uintptr(unsafe.Pointer(&de.Name[0])) + + if index := bytes.IndexByte(name, 0); index >= 0 { + // Found NULL byte; set slice's cap and len accordingly. + sh.Cap = index + sh.Len = index + } + + return name +} diff --git a/vendor/github.com/labstack/echo/.editorconfig b/vendor/github.com/labstack/echo/.editorconfig new file mode 100644 index 0000000..17ae50d --- /dev/null +++ b/vendor/github.com/labstack/echo/.editorconfig @@ -0,0 +1,25 @@ +# EditorConfig coding styles definitions. For more information about the +# properties used in this file, please see the EditorConfig documentation: +# http://editorconfig.org/ + +# indicate this is the root of the project +root = true + +[*] +charset = utf-8 + +end_of_line = LF +insert_final_newline = true +trim_trailing_whitespace = true + +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +[*.go] +indent_style = tab diff --git a/vendor/github.com/labstack/echo/.gitattributes b/vendor/github.com/labstack/echo/.gitattributes new file mode 100644 index 0000000..49b63e5 --- /dev/null +++ b/vendor/github.com/labstack/echo/.gitattributes @@ -0,0 +1,20 @@ +# Automatically normalize line endings for all text-based files +# http://git-scm.com/docs/gitattributes#_end_of_line_conversion +* text=auto + +# For the following file types, normalize line endings to LF on checking and +# prevent conversion to CRLF when they are checked out (this is required in +# order to prevent newline related issues) +.* text eol=lf +*.go text eol=lf +*.yml text eol=lf +*.html text eol=lf +*.css text eol=lf +*.js text eol=lf +*.json text eol=lf +LICENSE text eol=lf + +# Exclude `website` and `cookbook` from GitHub's language statistics +# https://github.com/github/linguist#using-gitattributes +cookbook/* linguist-documentation +website/* linguist-documentation diff --git a/vendor/github.com/labstack/echo/.gitignore b/vendor/github.com/labstack/echo/.gitignore new file mode 100644 index 0000000..861c723 --- /dev/null +++ b/vendor/github.com/labstack/echo/.gitignore @@ -0,0 +1,6 @@ +.DS_Store +coverage.txt +_test +vendor +.idea +*.iml diff --git a/vendor/github.com/labstack/echo/.travis.yml b/vendor/github.com/labstack/echo/.travis.yml new file mode 100644 index 0000000..0b36b3c --- /dev/null +++ b/vendor/github.com/labstack/echo/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.7 + - 1.8 + - tip +install: + - make dependency +script: + - make test +after_success: + - bash <(curl -s https://codecov.io/bash) +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/labstack/echo/Gopkg.lock b/vendor/github.com/labstack/echo/Gopkg.lock new file mode 100644 index 0000000..272aaa5 --- /dev/null +++ b/vendor/github.com/labstack/echo/Gopkg.lock @@ -0,0 +1,75 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" + version = "v3.0.0" + +[[projects]] + name = "github.com/labstack/gommon" + packages = ["bytes","color","log","random"] + revision = "1121fd3e243c202482226a7afe4dcd07ffc4139a" + version = "v0.2.1" + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "d228849504861217f796da67fae4f6e347643f15" + version = "v0.0.7" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "fc9e8d8ef48496124e79ae0df75490096eccf6fe" + version = "v0.0.2" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert"] + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[[projects]] + branch = "master" + name = "github.com/valyala/bytebufferpool" + packages = ["."] + revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" + +[[projects]] + branch = "master" + name = "github.com/valyala/fasttemplate" + packages = ["."] + revision = "dcecefd839c4193db0d35b88ec65b4c12d360ab0" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["acme","acme/autocert"] + revision = "e1a4589e7d3ea14a3352255d04b6f1a418845e5e" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "b90f89a1e7a9c1f6b918820b3daa7f08488c8594" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "5f74a2a2ba5b07475ad0faa1b4c021b973ad40b2ae749e3d94e15fe839bb440e" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/labstack/echo/Gopkg.toml b/vendor/github.com/labstack/echo/Gopkg.toml new file mode 100644 index 0000000..a24f61b --- /dev/null +++ b/vendor/github.com/labstack/echo/Gopkg.toml @@ -0,0 +1,87 @@ + +## Gopkg.toml example (these lines may be deleted) + +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + +## "required" lists a set of packages (not projects) that must be included in +## Gopkg.lock. This list is merged with the set of packages imported by the current +## project. Use it when your project needs a package it doesn't explicitly import - +## including "main" packages. +# required = ["github.com/user/thing/cmd/thing"] + +## "ignored" lists a set of packages (not projects) that are ignored when +## dep statically analyzes source code. Ignored packages can be in this project, +## or in a dependency. +# ignored = ["github.com/user/project/badpkg"] + +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by +## dep whether coming from the Gopkg.toml of the current project or a dependency. +# [[constraint]] +## Required: the root import path of the project being constrained. +# name = "github.com/user/project" +# +## Recommended: the version constraint to enforce for the project. +## Only one of "branch", "version" or "revision" can be specified. +# version = "1.0.0" +# branch = "master" +# revision = "abc123" +# +## Optional: an alternate location (URL or import path) for the project's source. +# source = "https://github.com/myfork/package.git" +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. +## +## Overrides are a sledgehammer. Use them only as a last resort. +# [[override]] +## Required: the root import path of the project being constrained. +# name = "github.com/user/project" +# +## Optional: specifying a version constraint override will cause all other +## constraints on this project to be ignored; only the overridden constraint +## need be satisfied. +## Again, only one of "branch", "version" or "revision" can be specified. +# version = "1.0.0" +# branch = "master" +# revision = "abc123" +# +## Optional: specifying an alternate source location as an override will +## enforce that the alternate location is used for that project, regardless of +## what source location any dependent projects specify. +# source = "https://github.com/myfork/package.git" + + + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.0.0" + +[[constraint]] + name = "github.com/labstack/gommon" + version = "0.2.1" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" + +[[constraint]] + branch = "master" + name = "github.com/valyala/fasttemplate" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/labstack/echo/LICENSE b/vendor/github.com/labstack/echo/LICENSE new file mode 100644 index 0000000..b5b006b --- /dev/null +++ b/vendor/github.com/labstack/echo/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 LabStack + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/labstack/echo/Makefile b/vendor/github.com/labstack/echo/Makefile new file mode 100644 index 0000000..87f7534 --- /dev/null +++ b/vendor/github.com/labstack/echo/Makefile @@ -0,0 +1,10 @@ +dependency: + go get -u github.com/golang/dep/cmd/dep + dep ensure -update + +test: + echo "" > coverage.txt + for d in $(shell go list ./... | grep -v vendor); do \ + go test -race -coverprofile=profile.out -covermode=atomic $$d; \ + [ -f profile.out ] && cat profile.out >> coverage.txt && rm profile.out; \ + done diff --git a/vendor/github.com/labstack/echo/README.md b/vendor/github.com/labstack/echo/README.md new file mode 100644 index 0000000..6c71756 --- /dev/null +++ b/vendor/github.com/labstack/echo/README.md @@ -0,0 +1,62 @@ + + +[![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/labstack/echo) +[![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo) +[![Build Status](http://img.shields.io/travis/labstack/echo.svg?style=flat-square)](https://travis-ci.org/labstack/echo) +[![Codecov](https://img.shields.io/codecov/c/github/labstack/echo.svg?style=flat-square)](https://codecov.io/gh/labstack/echo) [![Forum](https://img.shields.io/badge/community-forum-00afd1.svg?style=flat-square)](https://forum.labstack.com) +[![Twitter](https://img.shields.io/badge/twitter-@labstack-55acee.svg?style=flat-square)](https://twitter.com/labstack) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/labstack/echo/master/LICENSE) + +## Feature Overview + +- Optimized HTTP router which smartly prioritize routes +- Build robust and scalable RESTful APIs +- Group APIs +- Extensible middleware framework +- Define middleware at root, group or route level +- Data binding for JSON, XML and form payload +- Handy functions to send variety of HTTP responses +- Centralized HTTP error handling +- Template rendering with any template engine +- Define your format for the logger +- Highly customizable +- Automatic TLS via Let’s Encrypt +- HTTP/2 support + +## Performance + +![Performance](https://i.imgur.com/F2V7TfO.png) + +## [Get Started](https://echo.labstack.com/guide) + +## Support Us + +- :star: the project +- [Donate](https://echo.labstack.com/support-echo) +- :earth_americas: spread the word +- [Contribute](#contribute) to the project + +## Contribute + +**Use issues for everything** + +- For a small change, just send a PR. +- For bigger changes open an issue for discussion before sending a PR. +- PR should have: + - Test case + - Documentation + - Example (If it makes sense) +- You can also contribute by: + - Reporting issues + - Suggesting new features or enhancements + - Improve/fix documentation + +## Credits +- [Vishal Rana](https://github.com/vishr) - Author +- [Nitin Rana](https://github.com/nr17) - Consultant +- [Contributors](https://github.com/labstack/echo/graphs/contributors) + +## License + +[MIT](https://github.com/labstack/echo/blob/master/LICENSE) diff --git a/vendor/github.com/labstack/echo/bind.go b/vendor/github.com/labstack/echo/bind.go new file mode 100644 index 0000000..186bd83 --- /dev/null +++ b/vendor/github.com/labstack/echo/bind.go @@ -0,0 +1,261 @@ +package echo + +import ( + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" +) + +type ( + // Binder is the interface that wraps the Bind method. + Binder interface { + Bind(i interface{}, c Context) error + } + + // DefaultBinder is the default implementation of the Binder interface. + DefaultBinder struct{} + + // BindUnmarshaler is the interface used to wrap the UnmarshalParam method. + BindUnmarshaler interface { + // UnmarshalParam decodes and assigns a value from an form or query param. + UnmarshalParam(param string) error + } +) + +// Bind implements the `Binder#Bind` function. +func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) { + req := c.Request() + if req.ContentLength == 0 { + if req.Method == GET || req.Method == DELETE { + if err = b.bindData(i, c.QueryParams(), "query"); err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()) + } + return + } + return NewHTTPError(http.StatusBadRequest, "Request body can't be empty") + } + ctype := req.Header.Get(HeaderContentType) + switch { + case strings.HasPrefix(ctype, MIMEApplicationJSON): + if err = json.NewDecoder(req.Body).Decode(i); err != nil { + if ute, ok := err.(*json.UnmarshalTypeError); ok { + return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unmarshal type error: expected=%v, got=%v, offset=%v", ute.Type, ute.Value, ute.Offset)) + } else if se, ok := err.(*json.SyntaxError); ok { + return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Syntax error: offset=%v, error=%v", se.Offset, se.Error())) + } else { + return NewHTTPError(http.StatusBadRequest, err.Error()) + } + } + case strings.HasPrefix(ctype, MIMEApplicationXML), strings.HasPrefix(ctype, MIMETextXML): + if err = xml.NewDecoder(req.Body).Decode(i); err != nil { + if ute, ok := err.(*xml.UnsupportedTypeError); ok { + return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Unsupported type error: type=%v, error=%v", ute.Type, ute.Error())) + } else if se, ok := err.(*xml.SyntaxError); ok { + return NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Syntax error: line=%v, error=%v", se.Line, se.Error())) + } else { + return NewHTTPError(http.StatusBadRequest, err.Error()) + } + } + case strings.HasPrefix(ctype, MIMEApplicationForm), strings.HasPrefix(ctype, MIMEMultipartForm): + params, err := c.FormParams() + if err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()) + } + if err = b.bindData(i, params, "form"); err != nil { + return NewHTTPError(http.StatusBadRequest, err.Error()) + } + default: + return ErrUnsupportedMediaType + } + return +} + +func (b *DefaultBinder) bindData(ptr interface{}, data map[string][]string, tag string) error { + typ := reflect.TypeOf(ptr).Elem() + val := reflect.ValueOf(ptr).Elem() + + if typ.Kind() != reflect.Struct { + return errors.New("Binding element must be a struct") + } + + for i := 0; i < typ.NumField(); i++ { + typeField := typ.Field(i) + structField := val.Field(i) + if !structField.CanSet() { + continue + } + structFieldKind := structField.Kind() + inputFieldName := typeField.Tag.Get(tag) + + if inputFieldName == "" { + inputFieldName = typeField.Name + // If tag is nil, we inspect if the field is a struct. + if _, ok := bindUnmarshaler(structField); !ok && structFieldKind == reflect.Struct { + err := b.bindData(structField.Addr().Interface(), data, tag) + if err != nil { + return err + } + continue + } + } + inputValue, exists := data[inputFieldName] + if !exists { + continue + } + + // Call this first, in case we're dealing with an alias to an array type + if ok, err := unmarshalField(typeField.Type.Kind(), inputValue[0], structField); ok { + if err != nil { + return err + } + continue + } + + numElems := len(inputValue) + if structFieldKind == reflect.Slice && numElems > 0 { + sliceOf := structField.Type().Elem().Kind() + slice := reflect.MakeSlice(structField.Type(), numElems, numElems) + for j := 0; j < numElems; j++ { + if err := setWithProperType(sliceOf, inputValue[j], slice.Index(j)); err != nil { + return err + } + } + val.Field(i).Set(slice) + } else { + if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil { + return err + } + } + } + return nil +} + +func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error { + // But also call it here, in case we're dealing with an array of BindUnmarshalers + if ok, err := unmarshalField(valueKind, val, structField); ok { + return err + } + + switch valueKind { + case reflect.Ptr: + return setWithProperType(structField.Elem().Kind(), val, structField.Elem()) + case reflect.Int: + return setIntField(val, 0, structField) + case reflect.Int8: + return setIntField(val, 8, structField) + case reflect.Int16: + return setIntField(val, 16, structField) + case reflect.Int32: + return setIntField(val, 32, structField) + case reflect.Int64: + return setIntField(val, 64, structField) + case reflect.Uint: + return setUintField(val, 0, structField) + case reflect.Uint8: + return setUintField(val, 8, structField) + case reflect.Uint16: + return setUintField(val, 16, structField) + case reflect.Uint32: + return setUintField(val, 32, structField) + case reflect.Uint64: + return setUintField(val, 64, structField) + case reflect.Bool: + return setBoolField(val, structField) + case reflect.Float32: + return setFloatField(val, 32, structField) + case reflect.Float64: + return setFloatField(val, 64, structField) + case reflect.String: + structField.SetString(val) + default: + return errors.New("unknown type") + } + return nil +} + +func unmarshalField(valueKind reflect.Kind, val string, field reflect.Value) (bool, error) { + switch valueKind { + case reflect.Ptr: + return unmarshalFieldPtr(val, field) + default: + return unmarshalFieldNonPtr(val, field) + } +} + +// bindUnmarshaler attempts to unmarshal a reflect.Value into a BindUnmarshaler +func bindUnmarshaler(field reflect.Value) (BindUnmarshaler, bool) { + ptr := reflect.New(field.Type()) + if ptr.CanInterface() { + iface := ptr.Interface() + if unmarshaler, ok := iface.(BindUnmarshaler); ok { + return unmarshaler, ok + } + } + return nil, false +} + +func unmarshalFieldNonPtr(value string, field reflect.Value) (bool, error) { + if unmarshaler, ok := bindUnmarshaler(field); ok { + err := unmarshaler.UnmarshalParam(value) + field.Set(reflect.ValueOf(unmarshaler).Elem()) + return true, err + } + return false, nil +} + +func unmarshalFieldPtr(value string, field reflect.Value) (bool, error) { + if field.IsNil() { + // Initialize the pointer to a nil value + field.Set(reflect.New(field.Type().Elem())) + } + return unmarshalFieldNonPtr(value, field.Elem()) +} + +func setIntField(value string, bitSize int, field reflect.Value) error { + if value == "" { + value = "0" + } + intVal, err := strconv.ParseInt(value, 10, bitSize) + if err == nil { + field.SetInt(intVal) + } + return err +} + +func setUintField(value string, bitSize int, field reflect.Value) error { + if value == "" { + value = "0" + } + uintVal, err := strconv.ParseUint(value, 10, bitSize) + if err == nil { + field.SetUint(uintVal) + } + return err +} + +func setBoolField(value string, field reflect.Value) error { + if value == "" { + value = "false" + } + boolVal, err := strconv.ParseBool(value) + if err == nil { + field.SetBool(boolVal) + } + return err +} + +func setFloatField(value string, bitSize int, field reflect.Value) error { + if value == "" { + value = "0.0" + } + floatVal, err := strconv.ParseFloat(value, bitSize) + if err == nil { + field.SetFloat(floatVal) + } + return err +} diff --git a/vendor/github.com/labstack/echo/context.go b/vendor/github.com/labstack/echo/context.go new file mode 100644 index 0000000..a1ee3e0 --- /dev/null +++ b/vendor/github.com/labstack/echo/context.go @@ -0,0 +1,577 @@ +package echo + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" +) + +type ( + // Context represents the context of the current HTTP request. It holds request and + // response objects, path, path parameters, data and registered handler. + Context interface { + // Request returns `*http.Request`. + Request() *http.Request + + // SetRequest sets `*http.Request`. + SetRequest(r *http.Request) + + // Response returns `*Response`. + Response() *Response + + // IsTLS returns true if HTTP connection is TLS otherwise false. + IsTLS() bool + + // IsWebSocket returns true if HTTP connection is WebSocket otherwise false. + IsWebSocket() bool + + // Scheme returns the HTTP protocol scheme, `http` or `https`. + Scheme() string + + // RealIP returns the client's network address based on `X-Forwarded-For` + // or `X-Real-IP` request header. + RealIP() string + + // Path returns the registered path for the handler. + Path() string + + // SetPath sets the registered path for the handler. + SetPath(p string) + + // Param returns path parameter by name. + Param(name string) string + + // ParamNames returns path parameter names. + ParamNames() []string + + // SetParamNames sets path parameter names. + SetParamNames(names ...string) + + // ParamValues returns path parameter values. + ParamValues() []string + + // SetParamValues sets path parameter values. + SetParamValues(values ...string) + + // QueryParam returns the query param for the provided name. + QueryParam(name string) string + + // QueryParams returns the query parameters as `url.Values`. + QueryParams() url.Values + + // QueryString returns the URL query string. + QueryString() string + + // FormValue returns the form field value for the provided name. + FormValue(name string) string + + // FormParams returns the form parameters as `url.Values`. + FormParams() (url.Values, error) + + // FormFile returns the multipart form file for the provided name. + FormFile(name string) (*multipart.FileHeader, error) + + // MultipartForm returns the multipart form. + MultipartForm() (*multipart.Form, error) + + // Cookie returns the named cookie provided in the request. + Cookie(name string) (*http.Cookie, error) + + // SetCookie adds a `Set-Cookie` header in HTTP response. + SetCookie(cookie *http.Cookie) + + // Cookies returns the HTTP cookies sent with the request. + Cookies() []*http.Cookie + + // Get retrieves data from the context. + Get(key string) interface{} + + // Set saves data in the context. + Set(key string, val interface{}) + + // Bind binds the request body into provided type `i`. The default binder + // does it based on Content-Type header. + Bind(i interface{}) error + + // Validate validates provided `i`. It is usually called after `Context#Bind()`. + // Validator must be registered using `Echo#Validator`. + Validate(i interface{}) error + + // Render renders a template with data and sends a text/html response with status + // code. Renderer must be registered using `Echo.Renderer`. + Render(code int, name string, data interface{}) error + + // HTML sends an HTTP response with status code. + HTML(code int, html string) error + + // HTMLBlob sends an HTTP blob response with status code. + HTMLBlob(code int, b []byte) error + + // String sends a string response with status code. + String(code int, s string) error + + // JSON sends a JSON response with status code. + JSON(code int, i interface{}) error + + // JSONPretty sends a pretty-print JSON with status code. + JSONPretty(code int, i interface{}, indent string) error + + // JSONBlob sends a JSON blob response with status code. + JSONBlob(code int, b []byte) error + + // JSONP sends a JSONP response with status code. It uses `callback` to construct + // the JSONP payload. + JSONP(code int, callback string, i interface{}) error + + // JSONPBlob sends a JSONP blob response with status code. It uses `callback` + // to construct the JSONP payload. + JSONPBlob(code int, callback string, b []byte) error + + // XML sends an XML response with status code. + XML(code int, i interface{}) error + + // XMLPretty sends a pretty-print XML with status code. + XMLPretty(code int, i interface{}, indent string) error + + // XMLBlob sends an XML blob response with status code. + XMLBlob(code int, b []byte) error + + // Blob sends a blob response with status code and content type. + Blob(code int, contentType string, b []byte) error + + // Stream sends a streaming response with status code and content type. + Stream(code int, contentType string, r io.Reader) error + + // File sends a response with the content of the file. + File(file string) error + + // Attachment sends a response as attachment, prompting client to save the + // file. + Attachment(file string, name string) error + + // Inline sends a response as inline, opening the file in the browser. + Inline(file string, name string) error + + // NoContent sends a response with no body and a status code. + NoContent(code int) error + + // Redirect redirects the request to a provided URL with status code. + Redirect(code int, url string) error + + // Error invokes the registered HTTP error handler. Generally used by middleware. + Error(err error) + + // Handler returns the matched handler by router. + Handler() HandlerFunc + + // SetHandler sets the matched handler by router. + SetHandler(h HandlerFunc) + + // Logger returns the `Logger` instance. + Logger() Logger + + // Echo returns the `Echo` instance. + Echo() *Echo + + // Reset resets the context after request completes. It must be called along + // with `Echo#AcquireContext()` and `Echo#ReleaseContext()`. + // See `Echo#ServeHTTP()` + Reset(r *http.Request, w http.ResponseWriter) + } + + context struct { + request *http.Request + response *Response + path string + pnames []string + pvalues []string + query url.Values + handler HandlerFunc + store Map + echo *Echo + } +) + +const ( + defaultMemory = 32 << 20 // 32 MB + indexPage = "index.html" +) + +func (c *context) Request() *http.Request { + return c.request +} + +func (c *context) SetRequest(r *http.Request) { + c.request = r +} + +func (c *context) Response() *Response { + return c.response +} + +func (c *context) IsTLS() bool { + return c.request.TLS != nil +} + +func (c *context) IsWebSocket() bool { + upgrade := c.request.Header.Get(HeaderUpgrade) + return upgrade == "websocket" || upgrade == "Websocket" +} + +func (c *context) Scheme() string { + // Can't use `r.Request.URL.Scheme` + // See: https://groups.google.com/forum/#!topic/golang-nuts/pMUkBlQBDF0 + if c.IsTLS() { + return "https" + } + if scheme := c.request.Header.Get(HeaderXForwardedProto); scheme != "" { + return scheme + } + if scheme := c.request.Header.Get(HeaderXForwardedProtocol); scheme != "" { + return scheme + } + if ssl := c.request.Header.Get(HeaderXForwardedSsl); ssl == "on" { + return "https" + } + if scheme := c.request.Header.Get(HeaderXUrlScheme); scheme != "" { + return scheme + } + return "http" +} + +func (c *context) RealIP() string { + ra := c.request.RemoteAddr + if ip := c.request.Header.Get(HeaderXForwardedFor); ip != "" { + ra = strings.Split(ip, ", ")[0] + } else if ip := c.request.Header.Get(HeaderXRealIP); ip != "" { + ra = ip + } else { + ra, _, _ = net.SplitHostPort(ra) + } + return ra +} + +func (c *context) Path() string { + return c.path +} + +func (c *context) SetPath(p string) { + c.path = p +} + +func (c *context) Param(name string) string { + for i, n := range c.pnames { + if i < len(c.pvalues) { + if n == name { + return c.pvalues[i] + } + + // Param name with aliases + for _, p := range strings.Split(n, ",") { + if p == name { + return c.pvalues[i] + } + } + } + } + return "" +} + +func (c *context) ParamNames() []string { + return c.pnames +} + +func (c *context) SetParamNames(names ...string) { + c.pnames = names +} + +func (c *context) ParamValues() []string { + return c.pvalues[:len(c.pnames)] +} + +func (c *context) SetParamValues(values ...string) { + c.pvalues = values +} + +func (c *context) QueryParam(name string) string { + if c.query == nil { + c.query = c.request.URL.Query() + } + return c.query.Get(name) +} + +func (c *context) QueryParams() url.Values { + if c.query == nil { + c.query = c.request.URL.Query() + } + return c.query +} + +func (c *context) QueryString() string { + return c.request.URL.RawQuery +} + +func (c *context) FormValue(name string) string { + return c.request.FormValue(name) +} + +func (c *context) FormParams() (url.Values, error) { + if strings.HasPrefix(c.request.Header.Get(HeaderContentType), MIMEMultipartForm) { + if err := c.request.ParseMultipartForm(defaultMemory); err != nil { + return nil, err + } + } else { + if err := c.request.ParseForm(); err != nil { + return nil, err + } + } + return c.request.Form, nil +} + +func (c *context) FormFile(name string) (*multipart.FileHeader, error) { + _, fh, err := c.request.FormFile(name) + return fh, err +} + +func (c *context) MultipartForm() (*multipart.Form, error) { + err := c.request.ParseMultipartForm(defaultMemory) + return c.request.MultipartForm, err +} + +func (c *context) Cookie(name string) (*http.Cookie, error) { + return c.request.Cookie(name) +} + +func (c *context) SetCookie(cookie *http.Cookie) { + http.SetCookie(c.Response(), cookie) +} + +func (c *context) Cookies() []*http.Cookie { + return c.request.Cookies() +} + +func (c *context) Get(key string) interface{} { + return c.store[key] +} + +func (c *context) Set(key string, val interface{}) { + if c.store == nil { + c.store = make(Map) + } + c.store[key] = val +} + +func (c *context) Bind(i interface{}) error { + return c.echo.Binder.Bind(i, c) +} + +func (c *context) Validate(i interface{}) error { + if c.echo.Validator == nil { + return ErrValidatorNotRegistered + } + return c.echo.Validator.Validate(i) +} + +func (c *context) Render(code int, name string, data interface{}) (err error) { + if c.echo.Renderer == nil { + return ErrRendererNotRegistered + } + buf := new(bytes.Buffer) + if err = c.echo.Renderer.Render(buf, name, data, c); err != nil { + return + } + return c.HTMLBlob(code, buf.Bytes()) +} + +func (c *context) HTML(code int, html string) (err error) { + return c.HTMLBlob(code, []byte(html)) +} + +func (c *context) HTMLBlob(code int, b []byte) (err error) { + return c.Blob(code, MIMETextHTMLCharsetUTF8, b) +} + +func (c *context) String(code int, s string) (err error) { + return c.Blob(code, MIMETextPlainCharsetUTF8, []byte(s)) +} + +func (c *context) JSON(code int, i interface{}) (err error) { + _, pretty := c.QueryParams()["pretty"] + if c.echo.Debug || pretty { + return c.JSONPretty(code, i, " ") + } + b, err := json.Marshal(i) + if err != nil { + return + } + return c.JSONBlob(code, b) +} + +func (c *context) JSONPretty(code int, i interface{}, indent string) (err error) { + b, err := json.MarshalIndent(i, "", indent) + if err != nil { + return + } + return c.JSONBlob(code, b) +} + +func (c *context) JSONBlob(code int, b []byte) (err error) { + return c.Blob(code, MIMEApplicationJSONCharsetUTF8, b) +} + +func (c *context) JSONP(code int, callback string, i interface{}) (err error) { + b, err := json.Marshal(i) + if err != nil { + return + } + return c.JSONPBlob(code, callback, b) +} + +func (c *context) JSONPBlob(code int, callback string, b []byte) (err error) { + c.response.Header().Set(HeaderContentType, MIMEApplicationJavaScriptCharsetUTF8) + c.response.WriteHeader(code) + if _, err = c.response.Write([]byte(callback + "(")); err != nil { + return + } + if _, err = c.response.Write(b); err != nil { + return + } + _, err = c.response.Write([]byte(");")) + return +} + +func (c *context) XML(code int, i interface{}) (err error) { + _, pretty := c.QueryParams()["pretty"] + if c.echo.Debug || pretty { + return c.XMLPretty(code, i, " ") + } + b, err := xml.Marshal(i) + if err != nil { + return + } + return c.XMLBlob(code, b) +} + +func (c *context) XMLPretty(code int, i interface{}, indent string) (err error) { + b, err := xml.MarshalIndent(i, "", indent) + if err != nil { + return + } + return c.XMLBlob(code, b) +} + +func (c *context) XMLBlob(code int, b []byte) (err error) { + c.response.Header().Set(HeaderContentType, MIMEApplicationXMLCharsetUTF8) + c.response.WriteHeader(code) + if _, err = c.response.Write([]byte(xml.Header)); err != nil { + return + } + _, err = c.response.Write(b) + return +} + +func (c *context) Blob(code int, contentType string, b []byte) (err error) { + c.response.Header().Set(HeaderContentType, contentType) + c.response.WriteHeader(code) + _, err = c.response.Write(b) + return +} + +func (c *context) Stream(code int, contentType string, r io.Reader) (err error) { + c.response.Header().Set(HeaderContentType, contentType) + c.response.WriteHeader(code) + _, err = io.Copy(c.response, r) + return +} + +func (c *context) File(file string) (err error) { + f, err := os.Open(file) + if err != nil { + return ErrNotFound + } + defer f.Close() + + fi, _ := f.Stat() + if fi.IsDir() { + file = filepath.Join(file, indexPage) + f, err = os.Open(file) + if err != nil { + return ErrNotFound + } + defer f.Close() + if fi, err = f.Stat(); err != nil { + return + } + } + http.ServeContent(c.Response(), c.Request(), fi.Name(), fi.ModTime(), f) + return +} + +func (c *context) Attachment(file, name string) (err error) { + return c.contentDisposition(file, name, "attachment") +} + +func (c *context) Inline(file, name string) (err error) { + return c.contentDisposition(file, name, "inline") +} + +func (c *context) contentDisposition(file, name, dispositionType string) (err error) { + c.response.Header().Set(HeaderContentDisposition, fmt.Sprintf("%s; filename=%s", dispositionType, name)) + c.File(file) + return +} + +func (c *context) NoContent(code int) error { + c.response.WriteHeader(code) + return nil +} + +func (c *context) Redirect(code int, url string) error { + if code < 300 || code > 308 { + return ErrInvalidRedirectCode + } + c.response.Header().Set(HeaderLocation, url) + c.response.WriteHeader(code) + return nil +} + +func (c *context) Error(err error) { + c.echo.HTTPErrorHandler(err, c) +} + +func (c *context) Echo() *Echo { + return c.echo +} + +func (c *context) Handler() HandlerFunc { + return c.handler +} + +func (c *context) SetHandler(h HandlerFunc) { + c.handler = h +} + +func (c *context) Logger() Logger { + return c.echo.Logger +} + +func (c *context) Reset(r *http.Request, w http.ResponseWriter) { + c.request = r + c.response.reset(w) + c.query = nil + c.handler = NotFoundHandler + c.store = nil + c.path = "" + c.pnames = nil + // NOTE: Don't reset because it has to have length c.echo.maxParam at all times + // c.pvalues = nil +} diff --git a/vendor/github.com/labstack/echo/echo.go b/vendor/github.com/labstack/echo/echo.go new file mode 100644 index 0000000..ab17b20 --- /dev/null +++ b/vendor/github.com/labstack/echo/echo.go @@ -0,0 +1,716 @@ +/* +Package echo implements high performance, minimalist Go web framework. + +Example: + + package main + + import ( + "net/http" + + "github.com/labstack/echo" + "github.com/labstack/echo/middleware" + ) + + // Handler + func hello(c echo.Context) error { + return c.String(http.StatusOK, "Hello, World!") + } + + func main() { + // Echo instance + e := echo.New() + + // Middleware + e.Use(middleware.Logger()) + e.Use(middleware.Recover()) + + // Routes + e.GET("/", hello) + + // Start server + e.Logger.Fatal(e.Start(":1323")) + } + +Learn more at https://echo.labstack.com +*/ +package echo + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + stdLog "log" + "net" + "net/http" + "path" + "path/filepath" + "reflect" + "runtime" + "sync" + "time" + + "github.com/labstack/gommon/color" + "github.com/labstack/gommon/log" + "golang.org/x/crypto/acme/autocert" +) + +type ( + // Echo is the top-level framework instance. + Echo struct { + stdLogger *stdLog.Logger + colorer *color.Color + premiddleware []MiddlewareFunc + middleware []MiddlewareFunc + maxParam *int + router *Router + notFoundHandler HandlerFunc + pool sync.Pool + Server *http.Server + TLSServer *http.Server + Listener net.Listener + TLSListener net.Listener + DisableHTTP2 bool + Debug bool + HideBanner bool + HTTPErrorHandler HTTPErrorHandler + Binder Binder + Validator Validator + Renderer Renderer + AutoTLSManager autocert.Manager + // Mutex sync.RWMutex + Logger Logger + } + + // Route contains a handler and information for matching against requests. + Route struct { + Method string `json:"method"` + Path string `json:"path"` + Handler string `json:"handler"` + } + + // HTTPError represents an error that occurred while handling a request. + HTTPError struct { + Code int + Message interface{} + } + + // MiddlewareFunc defines a function to process middleware. + MiddlewareFunc func(HandlerFunc) HandlerFunc + + // HandlerFunc defines a function to server HTTP requests. + HandlerFunc func(Context) error + + // HTTPErrorHandler is a centralized HTTP error handler. + HTTPErrorHandler func(error, Context) + + // Validator is the interface that wraps the Validate function. + Validator interface { + Validate(i interface{}) error + } + + // Renderer is the interface that wraps the Render function. + Renderer interface { + Render(io.Writer, string, interface{}, Context) error + } + + // Map defines a generic map of type `map[string]interface{}`. + Map map[string]interface{} + + // i is the interface for Echo and Group. + i interface { + GET(string, HandlerFunc, ...MiddlewareFunc) + } +) + +// HTTP methods +const ( + CONNECT = "CONNECT" + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" + TRACE = "TRACE" +) + +// MIME types +const ( + MIMEApplicationJSON = "application/json" + MIMEApplicationJSONCharsetUTF8 = MIMEApplicationJSON + "; " + charsetUTF8 + MIMEApplicationJavaScript = "application/javascript" + MIMEApplicationJavaScriptCharsetUTF8 = MIMEApplicationJavaScript + "; " + charsetUTF8 + MIMEApplicationXML = "application/xml" + MIMEApplicationXMLCharsetUTF8 = MIMEApplicationXML + "; " + charsetUTF8 + MIMETextXML = "text/xml" + MIMETextXMLCharsetUTF8 = MIMETextXML + "; " + charsetUTF8 + MIMEApplicationForm = "application/x-www-form-urlencoded" + MIMEApplicationProtobuf = "application/protobuf" + MIMEApplicationMsgpack = "application/msgpack" + MIMETextHTML = "text/html" + MIMETextHTMLCharsetUTF8 = MIMETextHTML + "; " + charsetUTF8 + MIMETextPlain = "text/plain" + MIMETextPlainCharsetUTF8 = MIMETextPlain + "; " + charsetUTF8 + MIMEMultipartForm = "multipart/form-data" + MIMEOctetStream = "application/octet-stream" +) + +const ( + charsetUTF8 = "charset=UTF-8" +) + +// Headers +const ( + HeaderAccept = "Accept" + HeaderAcceptEncoding = "Accept-Encoding" + HeaderAllow = "Allow" + HeaderAuthorization = "Authorization" + HeaderContentDisposition = "Content-Disposition" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderCookie = "Cookie" + HeaderSetCookie = "Set-Cookie" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderLastModified = "Last-Modified" + HeaderLocation = "Location" + HeaderUpgrade = "Upgrade" + HeaderVary = "Vary" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXForwardedFor = "X-Forwarded-For" + HeaderXForwardedProto = "X-Forwarded-Proto" + HeaderXForwardedProtocol = "X-Forwarded-Protocol" + HeaderXForwardedSsl = "X-Forwarded-Ssl" + HeaderXUrlScheme = "X-Url-Scheme" + HeaderXHTTPMethodOverride = "X-HTTP-Method-Override" + HeaderXRealIP = "X-Real-IP" + HeaderXRequestID = "X-Request-ID" + HeaderServer = "Server" + HeaderOrigin = "Origin" + + // Access control + HeaderAccessControlRequestMethod = "Access-Control-Request-Method" + HeaderAccessControlRequestHeaders = "Access-Control-Request-Headers" + HeaderAccessControlAllowOrigin = "Access-Control-Allow-Origin" + HeaderAccessControlAllowMethods = "Access-Control-Allow-Methods" + HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers" + HeaderAccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers" + HeaderAccessControlMaxAge = "Access-Control-Max-Age" + + // Security + HeaderStrictTransportSecurity = "Strict-Transport-Security" + HeaderXContentTypeOptions = "X-Content-Type-Options" + HeaderXXSSProtection = "X-XSS-Protection" + HeaderXFrameOptions = "X-Frame-Options" + HeaderContentSecurityPolicy = "Content-Security-Policy" + HeaderXCSRFToken = "X-CSRF-Token" +) + +const ( + version = "3.2.1" + website = "https://echo.labstack.com" + // http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo + banner = ` + ____ __ + / __/___/ / ___ + / _// __/ _ \/ _ \ +/___/\__/_//_/\___/ %s +High performance, minimalist Go web framework +%s +____________________________________O/_______ + O\ +` +) + +var ( + methods = [...]string{ + CONNECT, + DELETE, + GET, + HEAD, + OPTIONS, + PATCH, + POST, + PUT, + TRACE, + } +) + +// Errors +var ( + ErrUnsupportedMediaType = NewHTTPError(http.StatusUnsupportedMediaType) + ErrNotFound = NewHTTPError(http.StatusNotFound) + ErrUnauthorized = NewHTTPError(http.StatusUnauthorized) + ErrForbidden = NewHTTPError(http.StatusForbidden) + ErrMethodNotAllowed = NewHTTPError(http.StatusMethodNotAllowed) + ErrStatusRequestEntityTooLarge = NewHTTPError(http.StatusRequestEntityTooLarge) + ErrValidatorNotRegistered = errors.New("Validator not registered") + ErrRendererNotRegistered = errors.New("Renderer not registered") + ErrInvalidRedirectCode = errors.New("Invalid redirect status code") + ErrCookieNotFound = errors.New("Cookie not found") +) + +// Error handlers +var ( + NotFoundHandler = func(c Context) error { + return ErrNotFound + } + + MethodNotAllowedHandler = func(c Context) error { + return ErrMethodNotAllowed + } +) + +// New creates an instance of Echo. +func New() (e *Echo) { + e = &Echo{ + Server: new(http.Server), + TLSServer: new(http.Server), + AutoTLSManager: autocert.Manager{ + Prompt: autocert.AcceptTOS, + }, + Logger: log.New("echo"), + colorer: color.New(), + maxParam: new(int), + } + e.Server.Handler = e + e.TLSServer.Handler = e + e.HTTPErrorHandler = e.DefaultHTTPErrorHandler + e.Binder = &DefaultBinder{} + e.Logger.SetLevel(log.OFF) + e.stdLogger = stdLog.New(e.Logger.Output(), e.Logger.Prefix()+": ", 0) + e.pool.New = func() interface{} { + return e.NewContext(nil, nil) + } + e.router = NewRouter(e) + return +} + +// NewContext returns a Context instance. +func (e *Echo) NewContext(r *http.Request, w http.ResponseWriter) Context { + return &context{ + request: r, + response: NewResponse(w, e), + store: make(Map), + echo: e, + pvalues: make([]string, *e.maxParam), + handler: NotFoundHandler, + } +} + +// Router returns router. +func (e *Echo) Router() *Router { + return e.router +} + +// DefaultHTTPErrorHandler is the default HTTP error handler. It sends a JSON response +// with status code. +func (e *Echo) DefaultHTTPErrorHandler(err error, c Context) { + var ( + code = http.StatusInternalServerError + msg interface{} + ) + + if he, ok := err.(*HTTPError); ok { + code = he.Code + msg = he.Message + } else if e.Debug { + msg = err.Error() + } else { + msg = http.StatusText(code) + } + if _, ok := msg.(string); ok { + msg = Map{"message": msg} + } + + if !c.Response().Committed { + if c.Request().Method == HEAD { // Issue #608 + if err := c.NoContent(code); err != nil { + goto ERROR + } + } else { + if err := c.JSON(code, msg); err != nil { + goto ERROR + } + } + } +ERROR: + e.Logger.Error(err) +} + +// Pre adds middleware to the chain which is run before router. +func (e *Echo) Pre(middleware ...MiddlewareFunc) { + e.premiddleware = append(e.premiddleware, middleware...) +} + +// Use adds middleware to the chain which is run after router. +func (e *Echo) Use(middleware ...MiddlewareFunc) { + e.middleware = append(e.middleware, middleware...) +} + +// CONNECT registers a new CONNECT route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) CONNECT(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(CONNECT, path, h, m...) +} + +// DELETE registers a new DELETE route for a path with matching handler in the router +// with optional route-level middleware. +func (e *Echo) DELETE(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(DELETE, path, h, m...) +} + +// GET registers a new GET route for a path with matching handler in the router +// with optional route-level middleware. +func (e *Echo) GET(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(GET, path, h, m...) +} + +// HEAD registers a new HEAD route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) HEAD(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(HEAD, path, h, m...) +} + +// OPTIONS registers a new OPTIONS route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) OPTIONS(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(OPTIONS, path, h, m...) +} + +// PATCH registers a new PATCH route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) PATCH(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(PATCH, path, h, m...) +} + +// POST registers a new POST route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) POST(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(POST, path, h, m...) +} + +// PUT registers a new PUT route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) PUT(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(PUT, path, h, m...) +} + +// TRACE registers a new TRACE route for a path with matching handler in the +// router with optional route-level middleware. +func (e *Echo) TRACE(path string, h HandlerFunc, m ...MiddlewareFunc) { + e.add(TRACE, path, h, m...) +} + +// Any registers a new route for all HTTP methods and path with matching handler +// in the router with optional route-level middleware. +func (e *Echo) Any(path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + for _, m := range methods { + e.add(m, path, handler, middleware...) + } +} + +// Match registers a new route for multiple HTTP methods and path with matching +// handler in the router with optional route-level middleware. +func (e *Echo) Match(methods []string, path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + for _, m := range methods { + e.add(m, path, handler, middleware...) + } +} + +// Static registers a new route with path prefix to serve static files from the +// provided root directory. +func (e *Echo) Static(prefix, root string) { + if root == "" { + root = "." // For security we want to restrict to CWD. + } + static(e, prefix, root) +} + +func static(i i, prefix, root string) { + h := func(c Context) error { + p, err := PathUnescape(c.Param("*")) + if err != nil { + return err + } + name := filepath.Join(root, path.Clean("/"+p)) // "/"+ for security + return c.File(name) + } + i.GET(prefix, h) + if prefix == "/" { + i.GET(prefix+"*", h) + } else { + i.GET(prefix+"/*", h) + } +} + +// File registers a new route with path to serve a static file. +func (e *Echo) File(path, file string) { + e.GET(path, func(c Context) error { + return c.File(file) + }) +} + +func (e *Echo) add(method, path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + name := handlerName(handler) + e.router.Add(method, path, func(c Context) error { + h := handler + // Chain middleware + for i := len(middleware) - 1; i >= 0; i-- { + h = middleware[i](h) + } + return h(c) + }) + r := &Route{ + Method: method, + Path: path, + Handler: name, + } + e.router.routes[method+path] = r +} + +// Group creates a new router group with prefix and optional group-level middleware. +func (e *Echo) Group(prefix string, m ...MiddlewareFunc) (g *Group) { + g = &Group{prefix: prefix, echo: e} + g.Use(m...) + return +} + +// URI generates a URI from handler. +func (e *Echo) URI(handler HandlerFunc, params ...interface{}) string { + uri := new(bytes.Buffer) + ln := len(params) + n := 0 + name := handlerName(handler) + for _, r := range e.router.routes { + if r.Handler == name { + for i, l := 0, len(r.Path); i < l; i++ { + if r.Path[i] == ':' && n < ln { + for ; i < l && r.Path[i] != '/'; i++ { + } + uri.WriteString(fmt.Sprintf("%v", params[n])) + n++ + } + if i < l { + uri.WriteByte(r.Path[i]) + } + } + break + } + } + return uri.String() +} + +// URL is an alias for `URI` function. +func (e *Echo) URL(h HandlerFunc, params ...interface{}) string { + return e.URI(h, params...) +} + +// Routes returns the registered routes. +func (e *Echo) Routes() []*Route { + routes := []*Route{} + for _, v := range e.router.routes { + routes = append(routes, v) + } + return routes +} + +// AcquireContext returns an empty `Context` instance from the pool. +// You must return the context by calling `ReleaseContext()`. +func (e *Echo) AcquireContext() Context { + return e.pool.Get().(Context) +} + +// ReleaseContext returns the `Context` instance back to the pool. +// You must call it after `AcquireContext()`. +func (e *Echo) ReleaseContext(c Context) { + e.pool.Put(c) +} + +// ServeHTTP implements `http.Handler` interface, which serves HTTP requests. +func (e *Echo) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Acquire lock + // e.Mutex.RLock() + // defer e.Mutex.RUnlock() + + // Acquire context + c := e.pool.Get().(*context) + defer e.pool.Put(c) + c.Reset(r, w) + + // Middleware + h := func(c Context) error { + method := r.Method + path := r.URL.RawPath + if path == "" { + path = r.URL.Path + } + e.router.Find(method, path, c) + h := c.Handler() + for i := len(e.middleware) - 1; i >= 0; i-- { + h = e.middleware[i](h) + } + return h(c) + } + + // Premiddleware + for i := len(e.premiddleware) - 1; i >= 0; i-- { + h = e.premiddleware[i](h) + } + + // Execute chain + if err := h(c); err != nil { + e.HTTPErrorHandler(err, c) + } +} + +// Start starts an HTTP server. +func (e *Echo) Start(address string) error { + e.Server.Addr = address + return e.StartServer(e.Server) +} + +// StartTLS starts an HTTPS server. +func (e *Echo) StartTLS(address string, certFile, keyFile string) (err error) { + if certFile == "" || keyFile == "" { + return errors.New("invalid tls configuration") + } + s := e.TLSServer + s.TLSConfig = new(tls.Config) + s.TLSConfig.Certificates = make([]tls.Certificate, 1) + s.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return + } + return e.startTLS(address) +} + +// StartAutoTLS starts an HTTPS server using certificates automatically installed from https://letsencrypt.org. +func (e *Echo) StartAutoTLS(address string) error { + s := e.TLSServer + s.TLSConfig = new(tls.Config) + s.TLSConfig.GetCertificate = e.AutoTLSManager.GetCertificate + return e.startTLS(address) +} + +func (e *Echo) startTLS(address string) error { + s := e.TLSServer + s.Addr = address + if !e.DisableHTTP2 { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2") + } + return e.StartServer(e.TLSServer) +} + +// StartServer starts a custom http server. +func (e *Echo) StartServer(s *http.Server) (err error) { + // Setup + e.colorer.SetOutput(e.Logger.Output()) + s.ErrorLog = e.stdLogger + s.Handler = e + if e.Debug { + e.Logger.SetLevel(log.DEBUG) + } + + if !e.HideBanner { + e.colorer.Printf(banner, e.colorer.Red("v"+version), e.colorer.Blue(website)) + } + + if s.TLSConfig == nil { + if e.Listener == nil { + e.Listener, err = newListener(s.Addr) + if err != nil { + return err + } + } + if !e.HideBanner { + e.colorer.Printf("⇨ http server started on %s\n", e.colorer.Green(e.Listener.Addr())) + } + return s.Serve(e.Listener) + } + if e.TLSListener == nil { + l, err := newListener(s.Addr) + if err != nil { + return err + } + e.TLSListener = tls.NewListener(l, s.TLSConfig) + } + if !e.HideBanner { + e.colorer.Printf("⇨ https server started on %s\n", e.colorer.Green(e.TLSListener.Addr())) + } + return s.Serve(e.TLSListener) +} + +// NewHTTPError creates a new HTTPError instance. +func NewHTTPError(code int, message ...interface{}) *HTTPError { + he := &HTTPError{Code: code, Message: http.StatusText(code)} + if len(message) > 0 { + he.Message = message[0] + } + return he +} + +// Error makes it compatible with `error` interface. +func (he *HTTPError) Error() string { + return fmt.Sprintf("code=%d, message=%v", he.Code, he.Message) +} + +// WrapHandler wraps `http.Handler` into `echo.HandlerFunc`. +func WrapHandler(h http.Handler) HandlerFunc { + return func(c Context) error { + h.ServeHTTP(c.Response(), c.Request()) + return nil + } +} + +// WrapMiddleware wraps `func(http.Handler) http.Handler` into `echo.MiddlewareFunc` +func WrapMiddleware(m func(http.Handler) http.Handler) MiddlewareFunc { + return func(next HandlerFunc) HandlerFunc { + return func(c Context) (err error) { + m(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c.SetRequest(r) + err = next(c) + })).ServeHTTP(c.Response(), c.Request()) + return + } + } +} + +func handlerName(h HandlerFunc) string { + t := reflect.ValueOf(h).Type() + if t.Kind() == reflect.Func { + return runtime.FuncForPC(reflect.ValueOf(h).Pointer()).Name() + } + return t.String() +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func newListener(address string) (*tcpKeepAliveListener, error) { + l, err := net.Listen("tcp", address) + if err != nil { + return nil, err + } + return &tcpKeepAliveListener{l.(*net.TCPListener)}, nil +} diff --git a/vendor/github.com/labstack/echo/echo_go1.8.go b/vendor/github.com/labstack/echo/echo_go1.8.go new file mode 100644 index 0000000..340bed7 --- /dev/null +++ b/vendor/github.com/labstack/echo/echo_go1.8.go @@ -0,0 +1,25 @@ +// +build go1.8 + +package echo + +import ( + stdContext "context" +) + +// Close immediately stops the server. +// It internally calls `http.Server#Close()`. +func (e *Echo) Close() error { + if err := e.TLSServer.Close(); err != nil { + return err + } + return e.Server.Close() +} + +// Shutdown stops server the gracefully. +// It internally calls `http.Server#Shutdown()`. +func (e *Echo) Shutdown(ctx stdContext.Context) error { + if err := e.TLSServer.Shutdown(ctx); err != nil { + return err + } + return e.Server.Shutdown(ctx) +} diff --git a/vendor/github.com/labstack/echo/group.go b/vendor/github.com/labstack/echo/group.go new file mode 100644 index 0000000..799a8f9 --- /dev/null +++ b/vendor/github.com/labstack/echo/group.go @@ -0,0 +1,113 @@ +package echo + +import ( + "path" +) + +type ( + // Group is a set of sub-routes for a specified route. It can be used for inner + // routes that share a common middleware or functionality that should be separate + // from the parent echo instance while still inheriting from it. + Group struct { + prefix string + middleware []MiddlewareFunc + echo *Echo + } +) + +// Use implements `Echo#Use()` for sub-routes within the Group. +func (g *Group) Use(middleware ...MiddlewareFunc) { + g.middleware = append(g.middleware, middleware...) + // Allow all requests to reach the group as they might get dropped if router + // doesn't find a match, making none of the group middleware process. + g.echo.Any(path.Clean(g.prefix+"/*"), func(c Context) error { + return ErrNotFound + }, g.middleware...) +} + +// CONNECT implements `Echo#CONNECT()` for sub-routes within the Group. +func (g *Group) CONNECT(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(CONNECT, path, h, m...) +} + +// DELETE implements `Echo#DELETE()` for sub-routes within the Group. +func (g *Group) DELETE(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(DELETE, path, h, m...) +} + +// GET implements `Echo#GET()` for sub-routes within the Group. +func (g *Group) GET(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(GET, path, h, m...) +} + +// HEAD implements `Echo#HEAD()` for sub-routes within the Group. +func (g *Group) HEAD(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(HEAD, path, h, m...) +} + +// OPTIONS implements `Echo#OPTIONS()` for sub-routes within the Group. +func (g *Group) OPTIONS(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(OPTIONS, path, h, m...) +} + +// PATCH implements `Echo#PATCH()` for sub-routes within the Group. +func (g *Group) PATCH(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(PATCH, path, h, m...) +} + +// POST implements `Echo#POST()` for sub-routes within the Group. +func (g *Group) POST(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(POST, path, h, m...) +} + +// PUT implements `Echo#PUT()` for sub-routes within the Group. +func (g *Group) PUT(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(PUT, path, h, m...) +} + +// TRACE implements `Echo#TRACE()` for sub-routes within the Group. +func (g *Group) TRACE(path string, h HandlerFunc, m ...MiddlewareFunc) { + g.add(TRACE, path, h, m...) +} + +// Any implements `Echo#Any()` for sub-routes within the Group. +func (g *Group) Any(path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + for _, m := range methods { + g.add(m, path, handler, middleware...) + } +} + +// Match implements `Echo#Match()` for sub-routes within the Group. +func (g *Group) Match(methods []string, path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + for _, m := range methods { + g.add(m, path, handler, middleware...) + } +} + +// Group creates a new sub-group with prefix and optional sub-group-level middleware. +func (g *Group) Group(prefix string, middleware ...MiddlewareFunc) *Group { + m := []MiddlewareFunc{} + m = append(m, g.middleware...) + m = append(m, middleware...) + return g.echo.Group(g.prefix+prefix, m...) +} + +// Static implements `Echo#Static()` for sub-routes within the Group. +func (g *Group) Static(prefix, root string) { + static(g, prefix, root) +} + +// File implements `Echo#File()` for sub-routes within the Group. +func (g *Group) File(path, file string) { + g.echo.File(g.prefix+path, file) +} + +func (g *Group) add(method, path string, handler HandlerFunc, middleware ...MiddlewareFunc) { + // Combine into a new slice to avoid accidentally passing the same slice for + // multiple routes, which would lead to later add() calls overwriting the + // middleware from earlier calls. + m := []MiddlewareFunc{} + m = append(m, g.middleware...) + m = append(m, middleware...) + g.echo.add(method, g.prefix+path, handler, m...) +} diff --git a/vendor/github.com/labstack/echo/log.go b/vendor/github.com/labstack/echo/log.go new file mode 100644 index 0000000..b194c39 --- /dev/null +++ b/vendor/github.com/labstack/echo/log.go @@ -0,0 +1,40 @@ +package echo + +import ( + "io" + + "github.com/labstack/gommon/log" +) + +type ( + // Logger defines the logging interface. + Logger interface { + Output() io.Writer + SetOutput(w io.Writer) + Prefix() string + SetPrefix(p string) + Level() log.Lvl + SetLevel(v log.Lvl) + Print(i ...interface{}) + Printf(format string, args ...interface{}) + Printj(j log.JSON) + Debug(i ...interface{}) + Debugf(format string, args ...interface{}) + Debugj(j log.JSON) + Info(i ...interface{}) + Infof(format string, args ...interface{}) + Infoj(j log.JSON) + Warn(i ...interface{}) + Warnf(format string, args ...interface{}) + Warnj(j log.JSON) + Error(i ...interface{}) + Errorf(format string, args ...interface{}) + Errorj(j log.JSON) + Fatal(i ...interface{}) + Fatalj(j log.JSON) + Fatalf(format string, args ...interface{}) + Panic(i ...interface{}) + Panicj(j log.JSON) + Panicf(format string, args ...interface{}) + } +) diff --git a/vendor/github.com/labstack/echo/response.go b/vendor/github.com/labstack/echo/response.go new file mode 100644 index 0000000..2c70d21 --- /dev/null +++ b/vendor/github.com/labstack/echo/response.go @@ -0,0 +1,89 @@ +package echo + +import ( + "bufio" + "net" + "net/http" +) + +type ( + // Response wraps an http.ResponseWriter and implements its interface to be used + // by an HTTP handler to construct an HTTP response. + // See: https://golang.org/pkg/net/http/#ResponseWriter + Response struct { + Writer http.ResponseWriter + Status int + Size int64 + Committed bool + echo *Echo + } +) + +// NewResponse creates a new instance of Response. +func NewResponse(w http.ResponseWriter, e *Echo) (r *Response) { + return &Response{Writer: w, echo: e} +} + +// Header returns the header map for the writer that will be sent by +// WriteHeader. Changing the header after a call to WriteHeader (or Write) has +// no effect unless the modified headers were declared as trailers by setting +// the "Trailer" header before the call to WriteHeader (see example) +// To suppress implicit response headers, set their value to nil. +// Example: https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +func (r *Response) Header() http.Header { + return r.Writer.Header() +} + +// WriteHeader sends an HTTP response header with status code. If WriteHeader is +// not called explicitly, the first call to Write will trigger an implicit +// WriteHeader(http.StatusOK). Thus explicit calls to WriteHeader are mainly +// used to send error codes. +func (r *Response) WriteHeader(code int) { + if r.Committed { + r.echo.Logger.Warn("response already committed") + return + } + r.Status = code + r.Writer.WriteHeader(code) + r.Committed = true +} + +// Write writes the data to the connection as part of an HTTP reply. +func (r *Response) Write(b []byte) (n int, err error) { + if !r.Committed { + r.WriteHeader(http.StatusOK) + } + n, err = r.Writer.Write(b) + r.Size += int64(n) + return +} + +// Flush implements the http.Flusher interface to allow an HTTP handler to flush +// buffered data to the client. +// See [http.Flusher](https://golang.org/pkg/net/http/#Flusher) +func (r *Response) Flush() { + r.Writer.(http.Flusher).Flush() +} + +// Hijack implements the http.Hijacker interface to allow an HTTP handler to +// take over the connection. +// See [http.Hijacker](https://golang.org/pkg/net/http/#Hijacker) +func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return r.Writer.(http.Hijacker).Hijack() +} + +// CloseNotify implements the http.CloseNotifier interface to allow detecting +// when the underlying connection has gone away. +// This mechanism can be used to cancel long operations on the server if the +// client has disconnected before the response is ready. +// See [http.CloseNotifier](https://golang.org/pkg/net/http/#CloseNotifier) +func (r *Response) CloseNotify() <-chan bool { + return r.Writer.(http.CloseNotifier).CloseNotify() +} + +func (r *Response) reset(w http.ResponseWriter) { + r.Writer = w + r.Size = 0 + r.Status = http.StatusOK + r.Committed = false +} diff --git a/vendor/github.com/labstack/echo/router.go b/vendor/github.com/labstack/echo/router.go new file mode 100644 index 0000000..626fe22 --- /dev/null +++ b/vendor/github.com/labstack/echo/router.go @@ -0,0 +1,437 @@ +package echo + +import "strings" + +type ( + // Router is the registry of all registered routes for an `Echo` instance for + // request matching and URL path parameter parsing. + Router struct { + tree *node + routes map[string]*Route + echo *Echo + } + node struct { + kind kind + label byte + prefix string + parent *node + children children + ppath string + pnames []string + methodHandler *methodHandler + } + kind uint8 + children []*node + methodHandler struct { + connect HandlerFunc + delete HandlerFunc + get HandlerFunc + head HandlerFunc + options HandlerFunc + patch HandlerFunc + post HandlerFunc + put HandlerFunc + trace HandlerFunc + } +) + +const ( + skind kind = iota + pkind + akind +) + +// NewRouter returns a new Router instance. +func NewRouter(e *Echo) *Router { + return &Router{ + tree: &node{ + methodHandler: new(methodHandler), + }, + routes: map[string]*Route{}, + echo: e, + } +} + +// Add registers a new route for method and path with matching handler. +func (r *Router) Add(method, path string, h HandlerFunc) { + // Validate path + if path == "" { + panic("echo: path cannot be empty") + } + if path[0] != '/' { + path = "/" + path + } + ppath := path // Pristine path + pnames := []string{} // Param names + + for i, l := 0, len(path); i < l; i++ { + if path[i] == ':' { + j := i + 1 + + r.insert(method, path[:i], nil, skind, "", nil) + for ; i < l && path[i] != '/'; i++ { + } + + pnames = append(pnames, path[j:i]) + path = path[:j] + path[i:] + i, l = j, len(path) + + if i == l { + r.insert(method, path[:i], h, pkind, ppath, pnames) + return + } + r.insert(method, path[:i], nil, pkind, ppath, pnames) + } else if path[i] == '*' { + r.insert(method, path[:i], nil, skind, "", nil) + pnames = append(pnames, "*") + r.insert(method, path[:i+1], h, akind, ppath, pnames) + return + } + } + + r.insert(method, path, h, skind, ppath, pnames) +} + +func (r *Router) insert(method, path string, h HandlerFunc, t kind, ppath string, pnames []string) { + // Adjust max param + l := len(pnames) + if *r.echo.maxParam < l { + *r.echo.maxParam = l + } + + cn := r.tree // Current node as root + if cn == nil { + panic("echo: invalid method") + } + search := path + + for { + sl := len(search) + pl := len(cn.prefix) + l := 0 + + // LCP + max := pl + if sl < max { + max = sl + } + for ; l < max && search[l] == cn.prefix[l]; l++ { + } + + if l == 0 { + // At root node + cn.label = search[0] + cn.prefix = search + if h != nil { + cn.kind = t + cn.addHandler(method, h) + cn.ppath = ppath + cn.pnames = pnames + } + } else if l < pl { + // Split node + n := newNode(cn.kind, cn.prefix[l:], cn, cn.children, cn.methodHandler, cn.ppath, cn.pnames) + + // Reset parent node + cn.kind = skind + cn.label = cn.prefix[0] + cn.prefix = cn.prefix[:l] + cn.children = nil + cn.methodHandler = new(methodHandler) + cn.ppath = "" + cn.pnames = nil + + cn.addChild(n) + + if l == sl { + // At parent node + cn.kind = t + cn.addHandler(method, h) + cn.ppath = ppath + cn.pnames = pnames + } else { + // Create child node + n = newNode(t, search[l:], cn, nil, new(methodHandler), ppath, pnames) + n.addHandler(method, h) + cn.addChild(n) + } + } else if l < sl { + search = search[l:] + c := cn.findChildWithLabel(search[0]) + if c != nil { + // Go deeper + cn = c + continue + } + // Create child node + n := newNode(t, search, cn, nil, new(methodHandler), ppath, pnames) + n.addHandler(method, h) + cn.addChild(n) + } else { + // Node already exists + if h != nil { + cn.addHandler(method, h) + cn.ppath = ppath + if len(cn.pnames) == 0 { // Issue #729 + cn.pnames = pnames + } + for i, n := range pnames { + // Param name aliases + if i < len(cn.pnames) && !strings.Contains(cn.pnames[i], n) { + cn.pnames[i] += "," + n + } + } + } + } + return + } +} + +func newNode(t kind, pre string, p *node, c children, mh *methodHandler, ppath string, pnames []string) *node { + return &node{ + kind: t, + label: pre[0], + prefix: pre, + parent: p, + children: c, + ppath: ppath, + pnames: pnames, + methodHandler: mh, + } +} + +func (n *node) addChild(c *node) { + n.children = append(n.children, c) +} + +func (n *node) findChild(l byte, t kind) *node { + for _, c := range n.children { + if c.label == l && c.kind == t { + return c + } + } + return nil +} + +func (n *node) findChildWithLabel(l byte) *node { + for _, c := range n.children { + if c.label == l { + return c + } + } + return nil +} + +func (n *node) findChildByKind(t kind) *node { + for _, c := range n.children { + if c.kind == t { + return c + } + } + return nil +} + +func (n *node) addHandler(method string, h HandlerFunc) { + switch method { + case GET: + n.methodHandler.get = h + case POST: + n.methodHandler.post = h + case PUT: + n.methodHandler.put = h + case DELETE: + n.methodHandler.delete = h + case PATCH: + n.methodHandler.patch = h + case OPTIONS: + n.methodHandler.options = h + case HEAD: + n.methodHandler.head = h + case CONNECT: + n.methodHandler.connect = h + case TRACE: + n.methodHandler.trace = h + } +} + +func (n *node) findHandler(method string) HandlerFunc { + switch method { + case GET: + return n.methodHandler.get + case POST: + return n.methodHandler.post + case PUT: + return n.methodHandler.put + case DELETE: + return n.methodHandler.delete + case PATCH: + return n.methodHandler.patch + case OPTIONS: + return n.methodHandler.options + case HEAD: + return n.methodHandler.head + case CONNECT: + return n.methodHandler.connect + case TRACE: + return n.methodHandler.trace + default: + return nil + } +} + +func (n *node) checkMethodNotAllowed() HandlerFunc { + for _, m := range methods { + if h := n.findHandler(m); h != nil { + return MethodNotAllowedHandler + } + } + return NotFoundHandler +} + +// Find lookup a handler registered for method and path. It also parses URL for path +// parameters and load them into context. +// +// For performance: +// +// - Get context from `Echo#AcquireContext()` +// - Reset it `Context#Reset()` +// - Return it `Echo#ReleaseContext()`. +func (r *Router) Find(method, path string, c Context) { + ctx := c.(*context) + ctx.path = path + cn := r.tree // Current node as root + + var ( + search = path + child *node // Child node + n int // Param counter + nk kind // Next kind + nn *node // Next node + ns string // Next search + pvalues = ctx.pvalues // Use the internal slice so the interface can keep the illusion of a dynamic slice + ) + + // Search order static > param > any + for { + if search == "" { + goto End + } + + pl := 0 // Prefix length + l := 0 // LCP length + + if cn.label != ':' { + sl := len(search) + pl = len(cn.prefix) + + // LCP + max := pl + if sl < max { + max = sl + } + for ; l < max && search[l] == cn.prefix[l]; l++ { + } + } + + if l == pl { + // Continue search + search = search[l:] + } else { + cn = nn + search = ns + if nk == pkind { + goto Param + } else if nk == akind { + goto Any + } + // Not found + return + } + + if search == "" { + goto End + } + + // Static node + if child = cn.findChild(search[0], skind); child != nil { + // Save next + if cn.prefix[len(cn.prefix)-1] == '/' { // Issue #623 + nk = pkind + nn = cn + ns = search + } + cn = child + continue + } + + // Param node + Param: + if child = cn.findChildByKind(pkind); child != nil { + // Issue #378 + if len(pvalues) == n { + continue + } + + // Save next + if cn.prefix[len(cn.prefix)-1] == '/' { // Issue #623 + nk = akind + nn = cn + ns = search + } + + cn = child + i, l := 0, len(search) + for ; i < l && search[i] != '/'; i++ { + } + pvalues[n] = search[:i] + n++ + search = search[i:] + continue + } + + // Any node + Any: + if cn = cn.findChildByKind(akind); cn == nil { + if nn != nil { + cn = nn + nn = cn.parent // Next (Issue #954) + search = ns + if nk == pkind { + goto Param + } else if nk == akind { + goto Any + } + } + // Not found + return + } + pvalues[len(cn.pnames)-1] = search + goto End + } + +End: + ctx.handler = cn.findHandler(method) + ctx.path = cn.ppath + ctx.pnames = cn.pnames + + // NOTE: Slow zone... + if ctx.handler == nil { + ctx.handler = cn.checkMethodNotAllowed() + + // Dig further for any, might have an empty value for *, e.g. + // serving a directory. Issue #207. + if cn = cn.findChildByKind(akind); cn == nil { + return + } + if h := cn.findHandler(method); h != nil { + ctx.handler = h + } else { + ctx.handler = cn.checkMethodNotAllowed() + } + ctx.path = cn.ppath + ctx.pnames = cn.pnames + pvalues[len(cn.pnames)-1] = "" + } + + return +} diff --git a/vendor/github.com/labstack/echo/util_go17.go b/vendor/github.com/labstack/echo/util_go17.go new file mode 100644 index 0000000..6b5d6b0 --- /dev/null +++ b/vendor/github.com/labstack/echo/util_go17.go @@ -0,0 +1,12 @@ +// +build go1.7,!go1.8 + +package echo + +import ( + "net/url" +) + +// PathUnescape is wraps `url.QueryUnescape` +func PathUnescape(s string) (string, error) { + return url.QueryUnescape(s) +} diff --git a/vendor/github.com/labstack/echo/util_go18.go b/vendor/github.com/labstack/echo/util_go18.go new file mode 100644 index 0000000..8a37785 --- /dev/null +++ b/vendor/github.com/labstack/echo/util_go18.go @@ -0,0 +1,10 @@ +// +build go1.8 + +package echo + +import "net/url" + +// PathUnescape is wraps `url.PathUnescape` +func PathUnescape(s string) (string, error) { + return url.PathUnescape(s) +} diff --git a/vendor/github.com/labstack/gommon/LICENSE b/vendor/github.com/labstack/gommon/LICENSE new file mode 100644 index 0000000..d2ae3ed --- /dev/null +++ b/vendor/github.com/labstack/gommon/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 labstack + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/labstack/gommon/color/README.md b/vendor/github.com/labstack/gommon/color/README.md new file mode 100644 index 0000000..297c351 --- /dev/null +++ b/vendor/github.com/labstack/gommon/color/README.md @@ -0,0 +1,86 @@ +# Color + +Style terminal text. + +## Installation + +```sh +go get github.com/labstack/gommon/color +``` + +## Windows? + +Try [cmder](http://bliker.github.io/cmder) or https://github.com/mattn/go-colorable + +## [Usage](https://github.com/labstack/gommon/blob/master/color/color_test.go) + +```sh +import github.com/labstack/gommon/color +``` + +### Colored text + +```go +color.Println(color.Black("black")) +color.Println(color.Red("red")) +color.Println(color.Green("green")) +color.Println(color.Yellow("yellow")) +color.Println(color.Blue("blue")) +color.Println(color.Magenta("magenta")) +color.Println(color.Cyan("cyan")) +color.Println(color.White("white")) +color.Println(color.Grey("grey")) +``` +![Colored Text](http://i.imgur.com/8RtY1QR.png) + +### Colored background + +```go +color.Println(color.BlackBg("black background", color.Wht)) +color.Println(color.RedBg("red background")) +color.Println(color.GreenBg("green background")) +color.Println(color.YellowBg("yellow background")) +color.Println(color.BlueBg("blue background")) +color.Println(color.MagentaBg("magenta background")) +color.Println(color.CyanBg("cyan background")) +color.Println(color.WhiteBg("white background")) +``` +![Colored Background](http://i.imgur.com/SrrS6lw.png) + +### Emphasis + +```go +color.Println(color.Bold("bold")) +color.Println(color.Dim("dim")) +color.Println(color.Italic("italic")) +color.Println(color.Underline("underline")) +color.Println(color.Inverse("inverse")) +color.Println(color.Hidden("hidden")) +color.Println(color.Strikeout("strikeout")) +``` +![Emphasis](http://i.imgur.com/3RSJBbc.png) + +### Mix and match + +```go +color.Println(color.Green("bold green with white background", color.B, color.WhtBg)) +color.Println(color.Red("underline red", color.U)) +color.Println(color.Yellow("dim yellow", color.D)) +color.Println(color.Cyan("inverse cyan", color.In)) +color.Println(color.Blue("bold underline dim blue", color.B, color.U, color.D)) +``` +![Mix and match](http://i.imgur.com/jWGq9Ca.png) + +### Enable/Disable the package + +```go +color.Disable() +color.Enable() +``` + +### New instance + +```go +c := New() +c.Green("green") +``` diff --git a/vendor/github.com/labstack/gommon/color/color.go b/vendor/github.com/labstack/gommon/color/color.go new file mode 100644 index 0000000..4131dcf --- /dev/null +++ b/vendor/github.com/labstack/gommon/color/color.go @@ -0,0 +1,407 @@ +package color + +import ( + "bytes" + "fmt" + "io" + "os" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +type ( + inner func(interface{}, []string, *Color) string +) + +// Color styles +const ( + // Blk Black text style + Blk = "30" + // Rd red text style + Rd = "31" + // Grn green text style + Grn = "32" + // Yel yellow text style + Yel = "33" + // Blu blue text style + Blu = "34" + // Mgn magenta text style + Mgn = "35" + // Cyn cyan text style + Cyn = "36" + // Wht white text style + Wht = "37" + // Gry grey text style + Gry = "90" + + // BlkBg black background style + BlkBg = "40" + // RdBg red background style + RdBg = "41" + // GrnBg green background style + GrnBg = "42" + // YelBg yellow background style + YelBg = "43" + // BluBg blue background style + BluBg = "44" + // MgnBg magenta background style + MgnBg = "45" + // CynBg cyan background style + CynBg = "46" + // WhtBg white background style + WhtBg = "47" + + // R reset emphasis style + R = "0" + // B bold emphasis style + B = "1" + // D dim emphasis style + D = "2" + // I italic emphasis style + I = "3" + // U underline emphasis style + U = "4" + // In inverse emphasis style + In = "7" + // H hidden emphasis style + H = "8" + // S strikeout emphasis style + S = "9" +) + +var ( + black = outer(Blk) + red = outer(Rd) + green = outer(Grn) + yellow = outer(Yel) + blue = outer(Blu) + magenta = outer(Mgn) + cyan = outer(Cyn) + white = outer(Wht) + grey = outer(Gry) + + blackBg = outer(BlkBg) + redBg = outer(RdBg) + greenBg = outer(GrnBg) + yellowBg = outer(YelBg) + blueBg = outer(BluBg) + magentaBg = outer(MgnBg) + cyanBg = outer(CynBg) + whiteBg = outer(WhtBg) + + reset = outer(R) + bold = outer(B) + dim = outer(D) + italic = outer(I) + underline = outer(U) + inverse = outer(In) + hidden = outer(H) + strikeout = outer(S) + + global = New() +) + +func outer(n string) inner { + return func(msg interface{}, styles []string, c *Color) string { + // TODO: Drop fmt to boost performance? + if c.disabled { + return fmt.Sprintf("%v", msg) + } + + b := new(bytes.Buffer) + b.WriteString("\x1b[") + b.WriteString(n) + for _, s := range styles { + b.WriteString(";") + b.WriteString(s) + } + b.WriteString("m") + return fmt.Sprintf("%s%v\x1b[0m", b.String(), msg) + } +} + +type ( + Color struct { + output io.Writer + disabled bool + } +) + +// New creates a Color instance. +func New() (c *Color) { + c = new(Color) + c.SetOutput(colorable.NewColorableStdout()) + return +} + +// Output returns the output. +func (c *Color) Output() io.Writer { + return c.output +} + +// SetOutput sets the output. +func (c *Color) SetOutput(w io.Writer) { + c.output = w + if w, ok := w.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) { + c.disabled = true + } +} + +// Disable disables the colors and styles. +func (c *Color) Disable() { + c.disabled = true +} + +// Enable enables the colors and styles. +func (c *Color) Enable() { + c.disabled = false +} + +// Print is analogous to `fmt.Print` with termial detection. +func (c *Color) Print(args ...interface{}) { + fmt.Fprint(c.output, args...) +} + +// Println is analogous to `fmt.Println` with termial detection. +func (c *Color) Println(args ...interface{}) { + fmt.Fprintln(c.output, args...) +} + +// Printf is analogous to `fmt.Printf` with termial detection. +func (c *Color) Printf(format string, args ...interface{}) { + fmt.Fprintf(c.output, format, args...) +} + +func (c *Color) Black(msg interface{}, styles ...string) string { + return black(msg, styles, c) +} + +func (c *Color) Red(msg interface{}, styles ...string) string { + return red(msg, styles, c) +} + +func (c *Color) Green(msg interface{}, styles ...string) string { + return green(msg, styles, c) +} + +func (c *Color) Yellow(msg interface{}, styles ...string) string { + return yellow(msg, styles, c) +} + +func (c *Color) Blue(msg interface{}, styles ...string) string { + return blue(msg, styles, c) +} + +func (c *Color) Magenta(msg interface{}, styles ...string) string { + return magenta(msg, styles, c) +} + +func (c *Color) Cyan(msg interface{}, styles ...string) string { + return cyan(msg, styles, c) +} + +func (c *Color) White(msg interface{}, styles ...string) string { + return white(msg, styles, c) +} + +func (c *Color) Grey(msg interface{}, styles ...string) string { + return grey(msg, styles, c) +} + +func (c *Color) BlackBg(msg interface{}, styles ...string) string { + return blackBg(msg, styles, c) +} + +func (c *Color) RedBg(msg interface{}, styles ...string) string { + return redBg(msg, styles, c) +} + +func (c *Color) GreenBg(msg interface{}, styles ...string) string { + return greenBg(msg, styles, c) +} + +func (c *Color) YellowBg(msg interface{}, styles ...string) string { + return yellowBg(msg, styles, c) +} + +func (c *Color) BlueBg(msg interface{}, styles ...string) string { + return blueBg(msg, styles, c) +} + +func (c *Color) MagentaBg(msg interface{}, styles ...string) string { + return magentaBg(msg, styles, c) +} + +func (c *Color) CyanBg(msg interface{}, styles ...string) string { + return cyanBg(msg, styles, c) +} + +func (c *Color) WhiteBg(msg interface{}, styles ...string) string { + return whiteBg(msg, styles, c) +} + +func (c *Color) Reset(msg interface{}, styles ...string) string { + return reset(msg, styles, c) +} + +func (c *Color) Bold(msg interface{}, styles ...string) string { + return bold(msg, styles, c) +} + +func (c *Color) Dim(msg interface{}, styles ...string) string { + return dim(msg, styles, c) +} + +func (c *Color) Italic(msg interface{}, styles ...string) string { + return italic(msg, styles, c) +} + +func (c *Color) Underline(msg interface{}, styles ...string) string { + return underline(msg, styles, c) +} + +func (c *Color) Inverse(msg interface{}, styles ...string) string { + return inverse(msg, styles, c) +} + +func (c *Color) Hidden(msg interface{}, styles ...string) string { + return hidden(msg, styles, c) +} + +func (c *Color) Strikeout(msg interface{}, styles ...string) string { + return strikeout(msg, styles, c) +} + +// Output returns the output. +func Output() io.Writer { + return global.output +} + +// SetOutput sets the output. +func SetOutput(w io.Writer) { + global.SetOutput(w) +} + +func Disable() { + global.Disable() +} + +func Enable() { + global.Enable() +} + +// Print is analogous to `fmt.Print` with termial detection. +func Print(args ...interface{}) { + global.Print(args...) +} + +// Println is analogous to `fmt.Println` with termial detection. +func Println(args ...interface{}) { + global.Println(args...) +} + +// Printf is analogous to `fmt.Printf` with termial detection. +func Printf(format string, args ...interface{}) { + global.Printf(format, args...) +} + +func Black(msg interface{}, styles ...string) string { + return global.Black(msg, styles...) +} + +func Red(msg interface{}, styles ...string) string { + return global.Red(msg, styles...) +} + +func Green(msg interface{}, styles ...string) string { + return global.Green(msg, styles...) +} + +func Yellow(msg interface{}, styles ...string) string { + return global.Yellow(msg, styles...) +} + +func Blue(msg interface{}, styles ...string) string { + return global.Blue(msg, styles...) +} + +func Magenta(msg interface{}, styles ...string) string { + return global.Magenta(msg, styles...) +} + +func Cyan(msg interface{}, styles ...string) string { + return global.Cyan(msg, styles...) +} + +func White(msg interface{}, styles ...string) string { + return global.White(msg, styles...) +} + +func Grey(msg interface{}, styles ...string) string { + return global.Grey(msg, styles...) +} + +func BlackBg(msg interface{}, styles ...string) string { + return global.BlackBg(msg, styles...) +} + +func RedBg(msg interface{}, styles ...string) string { + return global.RedBg(msg, styles...) +} + +func GreenBg(msg interface{}, styles ...string) string { + return global.GreenBg(msg, styles...) +} + +func YellowBg(msg interface{}, styles ...string) string { + return global.YellowBg(msg, styles...) +} + +func BlueBg(msg interface{}, styles ...string) string { + return global.BlueBg(msg, styles...) +} + +func MagentaBg(msg interface{}, styles ...string) string { + return global.MagentaBg(msg, styles...) +} + +func CyanBg(msg interface{}, styles ...string) string { + return global.CyanBg(msg, styles...) +} + +func WhiteBg(msg interface{}, styles ...string) string { + return global.WhiteBg(msg, styles...) +} + +func Reset(msg interface{}, styles ...string) string { + return global.Reset(msg, styles...) +} + +func Bold(msg interface{}, styles ...string) string { + return global.Bold(msg, styles...) +} + +func Dim(msg interface{}, styles ...string) string { + return global.Dim(msg, styles...) +} + +func Italic(msg interface{}, styles ...string) string { + return global.Italic(msg, styles...) +} + +func Underline(msg interface{}, styles ...string) string { + return global.Underline(msg, styles...) +} + +func Inverse(msg interface{}, styles ...string) string { + return global.Inverse(msg, styles...) +} + +func Hidden(msg interface{}, styles ...string) string { + return global.Hidden(msg, styles...) +} + +func Strikeout(msg interface{}, styles ...string) string { + return global.Strikeout(msg, styles...) +} diff --git a/vendor/github.com/labstack/gommon/log/README.md b/vendor/github.com/labstack/gommon/log/README.md new file mode 100644 index 0000000..d5b9e9f --- /dev/null +++ b/vendor/github.com/labstack/gommon/log/README.md @@ -0,0 +1,5 @@ +## WORK IN PROGRESS + +### Usage + +[log_test.go](log_test.go) diff --git a/vendor/github.com/labstack/gommon/log/color.go b/vendor/github.com/labstack/gommon/log/color.go new file mode 100644 index 0000000..7351b39 --- /dev/null +++ b/vendor/github.com/labstack/gommon/log/color.go @@ -0,0 +1,13 @@ +// +build !appengine + +package log + +import ( + "io" + + "github.com/mattn/go-colorable" +) + +func output() io.Writer { + return colorable.NewColorableStdout() +} diff --git a/vendor/github.com/labstack/gommon/log/log.go b/vendor/github.com/labstack/gommon/log/log.go new file mode 100644 index 0000000..0d77a87 --- /dev/null +++ b/vendor/github.com/labstack/gommon/log/log.go @@ -0,0 +1,415 @@ +package log + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path" + "runtime" + "strconv" + "sync" + "time" + + "github.com/mattn/go-isatty" + "github.com/valyala/fasttemplate" + + "github.com/labstack/gommon/color" +) + +type ( + Logger struct { + prefix string + level Lvl + skip int + output io.Writer + template *fasttemplate.Template + levels []string + color *color.Color + bufferPool sync.Pool + mutex sync.Mutex + } + + Lvl uint8 + + JSON map[string]interface{} +) + +const ( + DEBUG Lvl = iota + 1 + INFO + WARN + ERROR + OFF + panicLevel + fatalLevel +) + +var ( + global = New("-") + defaultHeader = `{"time":"${time_rfc3339_nano}","level":"${level}","prefix":"${prefix}",` + + `"file":"${short_file}","line":"${line}"}` +) + +func init() { + global.skip = 3 +} + +func New(prefix string) (l *Logger) { + l = &Logger{ + level: INFO, + skip: 2, + prefix: prefix, + template: l.newTemplate(defaultHeader), + color: color.New(), + bufferPool: sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 256)) + }, + }, + } + l.initLevels() + l.SetOutput(output()) + return +} + +func (l *Logger) initLevels() { + l.levels = []string{ + "-", + l.color.Blue("DEBUG"), + l.color.Green("INFO"), + l.color.Yellow("WARN"), + l.color.Red("ERROR"), + "", + l.color.Yellow("PANIC", color.U), + l.color.Red("FATAL", color.U), + } +} + +func (l *Logger) newTemplate(format string) *fasttemplate.Template { + return fasttemplate.New(format, "${", "}") +} + +func (l *Logger) DisableColor() { + l.color.Disable() + l.initLevels() +} + +func (l *Logger) EnableColor() { + l.color.Enable() + l.initLevels() +} + +func (l *Logger) Prefix() string { + return l.prefix +} + +func (l *Logger) SetPrefix(p string) { + l.prefix = p +} + +func (l *Logger) Level() Lvl { + return l.level +} + +func (l *Logger) SetLevel(v Lvl) { + l.level = v +} + +func (l *Logger) Output() io.Writer { + return l.output +} + +func (l *Logger) SetOutput(w io.Writer) { + l.output = w + if w, ok := w.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) { + l.DisableColor() + } +} + +func (l *Logger) Color() *color.Color { + return l.color +} + +func (l *Logger) SetHeader(h string) { + l.template = l.newTemplate(h) +} + +func (l *Logger) Print(i ...interface{}) { + l.log(0, "", i...) + // fmt.Fprintln(l.output, i...) +} + +func (l *Logger) Printf(format string, args ...interface{}) { + l.log(0, format, args...) +} + +func (l *Logger) Printj(j JSON) { + l.log(0, "json", j) +} + +func (l *Logger) Debug(i ...interface{}) { + l.log(DEBUG, "", i...) +} + +func (l *Logger) Debugf(format string, args ...interface{}) { + l.log(DEBUG, format, args...) +} + +func (l *Logger) Debugj(j JSON) { + l.log(DEBUG, "json", j) +} + +func (l *Logger) Info(i ...interface{}) { + l.log(INFO, "", i...) +} + +func (l *Logger) Infof(format string, args ...interface{}) { + l.log(INFO, format, args...) +} + +func (l *Logger) Infoj(j JSON) { + l.log(INFO, "json", j) +} + +func (l *Logger) Warn(i ...interface{}) { + l.log(WARN, "", i...) +} + +func (l *Logger) Warnf(format string, args ...interface{}) { + l.log(WARN, format, args...) +} + +func (l *Logger) Warnj(j JSON) { + l.log(WARN, "json", j) +} + +func (l *Logger) Error(i ...interface{}) { + l.log(ERROR, "", i...) +} + +func (l *Logger) Errorf(format string, args ...interface{}) { + l.log(ERROR, format, args...) +} + +func (l *Logger) Errorj(j JSON) { + l.log(ERROR, "json", j) +} + +func (l *Logger) Fatal(i ...interface{}) { + l.log(fatalLevel, "", i...) + os.Exit(1) +} + +func (l *Logger) Fatalf(format string, args ...interface{}) { + l.log(fatalLevel, format, args...) + os.Exit(1) +} + +func (l *Logger) Fatalj(j JSON) { + l.log(fatalLevel, "json", j) + os.Exit(1) +} + +func (l *Logger) Panic(i ...interface{}) { + l.log(panicLevel, "", i...) + panic(fmt.Sprint(i...)) +} + +func (l *Logger) Panicf(format string, args ...interface{}) { + l.log(panicLevel, format, args...) + panic(fmt.Sprintf(format, args)) +} + +func (l *Logger) Panicj(j JSON) { + l.log(panicLevel, "json", j) + panic(j) +} + +func DisableColor() { + global.DisableColor() +} + +func EnableColor() { + global.EnableColor() +} + +func Prefix() string { + return global.Prefix() +} + +func SetPrefix(p string) { + global.SetPrefix(p) +} + +func Level() Lvl { + return global.Level() +} + +func SetLevel(v Lvl) { + global.SetLevel(v) +} + +func Output() io.Writer { + return global.Output() +} + +func SetOutput(w io.Writer) { + global.SetOutput(w) +} + +func SetHeader(h string) { + global.SetHeader(h) +} + +func Print(i ...interface{}) { + global.Print(i...) +} + +func Printf(format string, args ...interface{}) { + global.Printf(format, args...) +} + +func Printj(j JSON) { + global.Printj(j) +} + +func Debug(i ...interface{}) { + global.Debug(i...) +} + +func Debugf(format string, args ...interface{}) { + global.Debugf(format, args...) +} + +func Debugj(j JSON) { + global.Debugj(j) +} + +func Info(i ...interface{}) { + global.Info(i...) +} + +func Infof(format string, args ...interface{}) { + global.Infof(format, args...) +} + +func Infoj(j JSON) { + global.Infoj(j) +} + +func Warn(i ...interface{}) { + global.Warn(i...) +} + +func Warnf(format string, args ...interface{}) { + global.Warnf(format, args...) +} + +func Warnj(j JSON) { + global.Warnj(j) +} + +func Error(i ...interface{}) { + global.Error(i...) +} + +func Errorf(format string, args ...interface{}) { + global.Errorf(format, args...) +} + +func Errorj(j JSON) { + global.Errorj(j) +} + +func Fatal(i ...interface{}) { + global.Fatal(i...) +} + +func Fatalf(format string, args ...interface{}) { + global.Fatalf(format, args...) +} + +func Fatalj(j JSON) { + global.Fatalj(j) +} + +func Panic(i ...interface{}) { + global.Panic(i...) +} + +func Panicf(format string, args ...interface{}) { + global.Panicf(format, args...) +} + +func Panicj(j JSON) { + global.Panicj(j) +} + +func (l *Logger) log(v Lvl, format string, args ...interface{}) { + l.mutex.Lock() + defer l.mutex.Unlock() + buf := l.bufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer l.bufferPool.Put(buf) + _, file, line, _ := runtime.Caller(l.skip) + + if v >= l.level || v == 0 { + message := "" + if format == "" { + message = fmt.Sprint(args...) + } else if format == "json" { + b, err := json.Marshal(args[0]) + if err != nil { + panic(err) + } + message = string(b) + } else { + message = fmt.Sprintf(format, args...) + } + + _, err := l.template.ExecuteFunc(buf, func(w io.Writer, tag string) (int, error) { + switch tag { + case "time_rfc3339": + return w.Write([]byte(time.Now().Format(time.RFC3339))) + case "time_rfc3339_nano": + return w.Write([]byte(time.Now().Format(time.RFC3339Nano))) + case "level": + return w.Write([]byte(l.levels[v])) + case "prefix": + return w.Write([]byte(l.prefix)) + case "long_file": + return w.Write([]byte(file)) + case "short_file": + return w.Write([]byte(path.Base(file))) + case "line": + return w.Write([]byte(strconv.Itoa(line))) + } + return 0, nil + }) + + if err == nil { + s := buf.String() + i := buf.Len() - 1 + if s[i] == '}' { + // JSON header + buf.Truncate(i) + buf.WriteByte(',') + if format == "json" { + buf.WriteString(message[1:]) + } else { + buf.WriteString(`"message":`) + buf.WriteString(strconv.Quote(message)) + buf.WriteString(`}`) + } + } else { + // Text header + buf.WriteByte(' ') + buf.WriteString(message) + } + buf.WriteByte('\n') + l.output.Write(buf.Bytes()) + } + } +} diff --git a/vendor/github.com/labstack/gommon/log/white.go b/vendor/github.com/labstack/gommon/log/white.go new file mode 100644 index 0000000..746cc56 --- /dev/null +++ b/vendor/github.com/labstack/gommon/log/white.go @@ -0,0 +1,12 @@ +// +build appengine + +package log + +import ( + "io" + "os" +) + +func output() io.Writer { + return os.Stdout +} diff --git a/vendor/github.com/maruel/panicparse/LICENSE b/vendor/github.com/maruel/panicparse/LICENSE new file mode 100644 index 0000000..b76840c --- /dev/null +++ b/vendor/github.com/maruel/panicparse/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 Marc-Antoine Ruel + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/maruel/panicparse/stack/bucket.go b/vendor/github.com/maruel/panicparse/stack/bucket.go new file mode 100644 index 0000000..259dab2 --- /dev/null +++ b/vendor/github.com/maruel/panicparse/stack/bucket.go @@ -0,0 +1,109 @@ +// Copyright 2015 Marc-Antoine Ruel. All rights reserved. +// Use of this source code is governed under the Apache License, Version 2.0 +// that can be found in the LICENSE file. + +package stack + +import ( + "sort" +) + +// Similarity is the level at which two call lines arguments must match to be +// considered similar enough to coalesce them. +type Similarity int + +const ( + // ExactFlags requires same bits (e.g. Locked). + ExactFlags Similarity = iota + // ExactLines requests the exact same arguments on the call line. + ExactLines + // AnyPointer considers different pointers a similar call line. + AnyPointer + // AnyValue accepts any value as similar call line. + AnyValue +) + +// Bucketize returns the number of similar goroutines. +func Bucketize(goroutines []Goroutine, similar Similarity) map[*Signature][]Goroutine { + out := map[*Signature][]Goroutine{} + // O(n²). Fix eventually. + for _, routine := range goroutines { + found := false + for key := range out { + // When a match is found, this effectively drops the other goroutine ID. + if key.Similar(&routine.Signature, similar) { + found = true + if !key.Equal(&routine.Signature) { + // Almost but not quite equal. There's different pointers passed + // around but the same values. Zap out the different values. + newKey := key.Merge(&routine.Signature) + out[newKey] = append(out[key], routine) + delete(out, key) + } else { + out[key] = append(out[key], routine) + } + break + } + } + if !found { + key := &Signature{} + *key = routine.Signature + out[key] = []Goroutine{routine} + } + } + return out +} + +// Bucket is a stack trace signature and the list of goroutines that fits this +// signature. +type Bucket struct { + Signature + Routines []Goroutine +} + +// First returns true if it contains the first goroutine, e.g. the ones that +// likely generated the panic() call, if any. +func (b *Bucket) First() bool { + for _, r := range b.Routines { + if r.First { + return true + } + } + return false +} + +// Less does reverse sort. +func (b *Bucket) Less(r *Bucket) bool { + if b.First() { + return true + } + if r.First() { + return false + } + return b.Signature.Less(&r.Signature) +} + +// Buckets is a list of Bucket sorted by repeation count. +type Buckets []Bucket + +func (b Buckets) Len() int { + return len(b) +} + +func (b Buckets) Less(i, j int) bool { + return b[i].Less(&b[j]) +} + +func (b Buckets) Swap(i, j int) { + b[j], b[i] = b[i], b[j] +} + +// SortBuckets creates a list of Bucket from each goroutine stack trace count. +func SortBuckets(buckets map[*Signature][]Goroutine) Buckets { + out := make(Buckets, 0, len(buckets)) + for signature, count := range buckets { + out = append(out, Bucket{*signature, count}) + } + sort.Sort(out) + return out +} diff --git a/vendor/github.com/maruel/panicparse/stack/source.go b/vendor/github.com/maruel/panicparse/stack/source.go new file mode 100644 index 0000000..f5fb8fb --- /dev/null +++ b/vendor/github.com/maruel/panicparse/stack/source.go @@ -0,0 +1,302 @@ +// Copyright 2015 Marc-Antoine Ruel. All rights reserved. +// Use of this source code is governed under the Apache License, Version 2.0 +// that can be found in the LICENSE file. + +// This file contains the code to process sources, to be able to deduct the +// original types. + +package stack + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "log" + "math" + "strings" +) + +// cache is a cache of sources on the file system. +type cache struct { + files map[string][]byte + parsed map[string]*parsedFile +} + +// Augment processes source files to improve calls to be more descriptive. +// +// It modifies goroutines in place. +func Augment(goroutines []Goroutine) { + c := &cache{} + for i := range goroutines { + c.augmentGoroutine(&goroutines[i]) + } +} + +// augmentGoroutine processes source files to improve call to be more +// descriptive. +// +// It modifies the routine. +func (c *cache) augmentGoroutine(goroutine *Goroutine) { + if c.files == nil { + c.files = map[string][]byte{} + } + if c.parsed == nil { + c.parsed = map[string]*parsedFile{} + } + // For each call site, look at the next call and populate it. Then we can + // walk back and reformat things. + for i := range goroutine.Stack.Calls { + c.load(goroutine.Stack.Calls[i].LocalSourcePath()) + } + + // Once all loaded, we can look at the next call when available. + for i := 0; i < len(goroutine.Stack.Calls)-1; i++ { + // Get the AST from the previous call and process the call line with it. + if f := c.getFuncAST(&goroutine.Stack.Calls[i]); f != nil { + processCall(&goroutine.Stack.Calls[i], f) + } + } +} + +// Private stuff. + +// load loads a source file and parses the AST tree. Failures are ignored. +func (c *cache) load(fileName string) { + if _, ok := c.parsed[fileName]; ok { + return + } + c.parsed[fileName] = nil + if !strings.HasSuffix(fileName, ".go") { + // Ignore C and assembly. + c.files[fileName] = nil + return + } + log.Printf("load(%s)", fileName) + if _, ok := c.files[fileName]; !ok { + var err error + if c.files[fileName], err = ioutil.ReadFile(fileName); err != nil { + log.Printf("Failed to read %s: %s", fileName, err) + c.files[fileName] = nil + return + } + } + fset := token.NewFileSet() + src := c.files[fileName] + parsed, err := parser.ParseFile(fset, fileName, src, 0) + if err != nil { + log.Printf("Failed to parse %s: %s", fileName, err) + return + } + // Convert the line number into raw file offset. + offsets := []int{0, 0} + start := 0 + for l := 1; start < len(src); l++ { + start += bytes.IndexByte(src[start:], '\n') + 1 + offsets = append(offsets, start) + } + c.parsed[fileName] = &parsedFile{offsets, parsed} +} + +func (c *cache) getFuncAST(call *Call) *ast.FuncDecl { + if p := c.parsed[call.LocalSourcePath()]; p != nil { + return p.getFuncAST(call.Func.Name(), call.Line) + } + return nil +} + +type parsedFile struct { + lineToByteOffset []int + parsed *ast.File +} + +// getFuncAST gets the callee site function AST representation for the code +// inside the function f at line l. +func (p *parsedFile) getFuncAST(f string, l int) (d *ast.FuncDecl) { + if len(p.lineToByteOffset) <= l { + // The line number in the stack trace line does not exist in the file. That + // can only mean that the sources on disk do not match the sources used to + // build the binary. + // TODO(maruel): This should be surfaced, so that source parsing is + // completely ignored. + return + } + + // Walk the AST to find the lineToByteOffset that fits the line number. + var lastFunc *ast.FuncDecl + var found ast.Node + // Inspect() goes depth first. This means for example that a function like: + // func a() { + // b := func() {} + // c() + // } + // + // Were we are looking at the c() call can return confused values. It is + // important to look at the actual ast.Node hierarchy. + ast.Inspect(p.parsed, func(n ast.Node) bool { + if d != nil { + return false + } + if n == nil { + return true + } + if found != nil { + // We are walking up. + } + if int(n.Pos()) >= p.lineToByteOffset[l] { + // We are expecting a ast.CallExpr node. It can be harder to figure out + // when there are multiple calls on a single line, as the stack trace + // doesn't have file byte offset information, only line based. + // gofmt will always format to one function call per line but there can + // be edge cases, like: + // a = A{Foo(), Bar()} + d = lastFunc + //p.processNode(call, n) + return false + } else if f, ok := n.(*ast.FuncDecl); ok { + lastFunc = f + } + return true + }) + return +} + +func name(n ast.Node) string { + switch t := n.(type) { + case *ast.InterfaceType: + return "interface{}" + case *ast.Ident: + return t.Name + case *ast.SelectorExpr: + return t.Sel.Name + case *ast.StarExpr: + return "*" + name(t.X) + default: + return "" + } +} + +// fieldToType returns the type name and whether if it's an ellipsis. +func fieldToType(f *ast.Field) (string, bool) { + switch arg := f.Type.(type) { + case *ast.ArrayType: + return "[]" + name(arg.Elt), false + case *ast.Ellipsis: + return name(arg.Elt), true + case *ast.FuncType: + // Do not print the function signature to not overload the trace. + return "func", false + case *ast.Ident: + return arg.Name, false + case *ast.InterfaceType: + return "interface{}", false + case *ast.SelectorExpr: + return arg.Sel.Name, false + case *ast.StarExpr: + return "*" + name(arg.X), false + case *ast.MapType: + return fmt.Sprintf("map[%s]%s", name(arg.Key), name(arg.Value)), false + case *ast.ChanType: + return fmt.Sprintf("chan %s", name(arg.Value)), false + default: + // TODO(maruel): Implement anything missing. + return "", false + } +} + +// extractArgumentsType returns the name of the type of each input argument. +func extractArgumentsType(f *ast.FuncDecl) ([]string, bool) { + var fields []*ast.Field + if f.Recv != nil { + if len(f.Recv.List) != 1 { + panic("Expect only one receiver; please fix panicparse's code") + } + // If it is an object receiver (vs a pointer receiver), its address is not + // printed in the stack trace so it needs to be ignored. + if _, ok := f.Recv.List[0].Type.(*ast.StarExpr); ok { + fields = append(fields, f.Recv.List[0]) + } + } + var types []string + extra := false + for _, arg := range append(fields, f.Type.Params.List...) { + // Assert that extra is only set on the last item of fields? + var t string + t, extra = fieldToType(arg) + mult := len(arg.Names) + if mult == 0 { + mult = 1 + } + for i := 0; i < mult; i++ { + types = append(types, t) + } + } + return types, extra +} + +// processCall walks the function and populate call accordingly. +func processCall(call *Call, f *ast.FuncDecl) { + values := make([]uint64, len(call.Args.Values)) + for i := range call.Args.Values { + values[i] = call.Args.Values[i].Value + } + index := 0 + pop := func() uint64 { + if len(values) != 0 { + x := values[0] + values = values[1:] + index++ + return x + } + return 0 + } + popName := func() string { + n := call.Args.Values[index].Name + v := pop() + if len(n) == 0 { + return fmt.Sprintf("0x%x", v) + } + return n + } + + types, extra := extractArgumentsType(f) + for i := 0; len(values) != 0; i++ { + var t string + if i >= len(types) { + if !extra { + // These are unexpected value! Print them as hex. + call.Args.Processed = append(call.Args.Processed, popName()) + continue + } + t = types[len(types)-1] + } else { + t = types[i] + } + switch t { + case "float32": + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%g", math.Float32frombits(uint32(pop())))) + case "float64": + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%g", math.Float64frombits(pop()))) + case "int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64": + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%d", pop())) + case "string": + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s, len=%d)", t, popName(), pop())) + default: + if strings.HasPrefix(t, "*") { + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s)", t, popName())) + } else if strings.HasPrefix(t, "[]") { + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s len=%d cap=%d)", t, popName(), pop(), pop())) + } else { + // Assumes it's an interface. For now, discard the object value, which + // is probably not a good idea. + call.Args.Processed = append(call.Args.Processed, fmt.Sprintf("%s(%s)", t, popName())) + pop() + } + } + if len(values) == 0 && call.Args.Elided { + return + } + } +} diff --git a/vendor/github.com/maruel/panicparse/stack/stack.go b/vendor/github.com/maruel/panicparse/stack/stack.go new file mode 100644 index 0000000..0aad495 --- /dev/null +++ b/vendor/github.com/maruel/panicparse/stack/stack.go @@ -0,0 +1,906 @@ +// Copyright 2015 Marc-Antoine Ruel. All rights reserved. +// Use of this source code is governed under the Apache License, Version 2.0 +// that can be found in the LICENSE file. + +// Package stack analyzes stack dump of Go processes and simplifies it. +// +// It is mostly useful on servers will large number of identical goroutines, +// making the crash dump harder to read than strictly necessary. +package stack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "log" + "math" + "net/url" + "os" + "os/user" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +const lockedToThread = "locked to thread" + +var ( + // TODO(maruel): Handle corrupted stack cases: + // - missed stack barrier + // - found next stack barrier at 0x123; expected + // - runtime: unexpected return pc for FUNC_NAME called from 0x123 + + reRoutineHeader = regexp.MustCompile("^goroutine (\\d+) \\[([^\\]]+)\\]\\:\r?\n$") + reMinutes = regexp.MustCompile("^(\\d+) minutes$") + reUnavail = regexp.MustCompile("^(?:\t| +)goroutine running on other thread; stack unavailable") + // See gentraceback() in src/runtime/traceback.go for more information. + // - Sometimes the source file comes up as "". It is the + // compiler than generated these, not the runtime. + // - The tab may be replaced with spaces when a user copy-paste it, handle + // this transparently. + // - "runtime.gopanic" is explicitly replaced with "panic" by gentraceback(). + // - The +0x123 byte offset is printed when frame.pc > _func.entry. _func is + // generated by the linker. + // - The +0x123 byte offset is not included with generated code, e.g. unnamed + // functions "func·006()" which is generally go func() { ... }() + // statements. Since the _func is generated at runtime, it's probably why + // _func.entry is not set. + // - C calls may have fp=0x123 sp=0x123 appended. I think it normally happens + // when a signal is not correctly handled. It is printed with m.throwing>0. + // These are discarded. + // - For cgo, the source file may be "??". + reFile = regexp.MustCompile("^(?:\t| +)(\\?\\?|\\|.+\\.(?:c|go|s))\\:(\\d+)(?:| \\+0x[0-9a-f]+)(?:| fp=0x[0-9a-f]+ sp=0x[0-9a-f]+)\r?\n$") + // Sadly, it doesn't note the goroutine number so we could cascade them per + // parenthood. + reCreated = regexp.MustCompile("^created by (.+)\r?\n$") + reFunc = regexp.MustCompile("^(.+)\\((.*)\\)\r?\n$") + reElided = regexp.MustCompile("^\\.\\.\\.additional frames elided\\.\\.\\.\r?\n$") + + // TODO(maruel): This is a global state, affected by ParseDump(). This will + // be refactored in v2. + + // goroot is the GOROOT as detected in the traceback, not the on the host. + // + // It can be empty if no root was determined, for example the traceback + // contains only non-stdlib source references. + goroot string + // gopaths is the GOPATH as detected in the traceback, with the value being + // the corresponding path mapped to the host. + // + // It can be empty if only stdlib code is in the traceback or if no local + // sources were matched up. In the general case there is only one. + gopaths map[string]string + // Corresponding local values on the host. + localgoroot = runtime.GOROOT() + localgopaths = getGOPATHs() +) + +// Function is a function call. +// +// Go stack traces print a mangled function call, this wrapper unmangle the +// string before printing and adds other filtering methods. +type Function struct { + Raw string +} + +// String is the fully qualified function name. +// +// Sadly Go is a bit confused when the package name doesn't match the directory +// containing the source file and will use the directory name instead of the +// real package name. +func (f Function) String() string { + s, _ := url.QueryUnescape(f.Raw) + return s +} + +// Name is the naked function name. +func (f Function) Name() string { + parts := strings.SplitN(filepath.Base(f.Raw), ".", 2) + if len(parts) == 1 { + return parts[0] + } + return parts[1] +} + +// PkgName is the package name for this function reference. +func (f Function) PkgName() string { + parts := strings.SplitN(filepath.Base(f.Raw), ".", 2) + if len(parts) == 1 { + return "" + } + s, _ := url.QueryUnescape(parts[0]) + return s +} + +// PkgDotName returns "." format. +func (f Function) PkgDotName() string { + parts := strings.SplitN(filepath.Base(f.Raw), ".", 2) + s, _ := url.QueryUnescape(parts[0]) + if len(parts) == 1 { + return parts[0] + } + if s != "" || parts[1] != "" { + return s + "." + parts[1] + } + return "" +} + +// IsExported returns true if the function is exported. +func (f Function) IsExported() bool { + name := f.Name() + parts := strings.Split(name, ".") + r, _ := utf8.DecodeRuneInString(parts[len(parts)-1]) + if unicode.ToUpper(r) == r { + return true + } + return f.PkgName() == "main" && name == "main" +} + +// Arg is an argument on a Call. +type Arg struct { + Value uint64 // Value is the raw value as found in the stack trace + Name string // Name is a pseudo name given to the argument +} + +// IsPtr returns true if we guess it's a pointer. It's only a guess, it can be +// easily be confused by a bitmask. +func (a *Arg) IsPtr() bool { + // Assumes all pointers are above 16Mb and positive. + return a.Value > 16*1024*1024 && a.Value < math.MaxInt64 +} + +func (a Arg) String() string { + if a.Name != "" { + return a.Name + } + if a.Value == 0 { + return "0" + } + return fmt.Sprintf("0x%x", a.Value) +} + +// Args is a series of function call arguments. +type Args struct { + Values []Arg // Values is the arguments as shown on the stack trace. They are mangled via simplification. + Processed []string // Processed is the arguments generated from processing the source files. It can have a length lower than Values. + Elided bool // If set, it means there was a trailing ", ..." +} + +func (a Args) String() string { + var v []string + if len(a.Processed) != 0 { + v = make([]string, 0, len(a.Processed)) + for _, item := range a.Processed { + v = append(v, item) + } + } else { + v = make([]string, 0, len(a.Values)) + for _, item := range a.Values { + v = append(v, item.String()) + } + } + if a.Elided { + v = append(v, "...") + } + return strings.Join(v, ", ") +} + +// Equal returns true only if both arguments are exactly equal. +func (a *Args) Equal(r *Args) bool { + if a.Elided != r.Elided || len(a.Values) != len(r.Values) { + return false + } + for i, l := range a.Values { + if l != r.Values[i] { + return false + } + } + return true +} + +// Similar returns true if the two Args are equal or almost but not quite +// equal. +func (a *Args) Similar(r *Args, similar Similarity) bool { + if a.Elided != r.Elided || len(a.Values) != len(r.Values) { + return false + } + if similar == AnyValue { + return true + } + for i, l := range a.Values { + switch similar { + case ExactFlags, ExactLines: + if l != r.Values[i] { + return false + } + default: + if l.IsPtr() != r.Values[i].IsPtr() || (!l.IsPtr() && l != r.Values[i]) { + return false + } + } + } + return true +} + +// Merge merges two similar Args, zapping out differences. +func (a *Args) Merge(r *Args) Args { + out := Args{ + Values: make([]Arg, len(a.Values)), + Elided: a.Elided, + } + for i, l := range a.Values { + if l != r.Values[i] { + out.Values[i].Name = "*" + out.Values[i].Value = l.Value + } else { + out.Values[i] = l + } + } + return out +} + +// Call is an item in the stack trace. +type Call struct { + SourcePath string // Full path name of the source file as seen in the trace + Line int // Line number + Func Function // Fully qualified function name (encoded). + Args Args // Call arguments +} + +// Equal returns true only if both calls are exactly equal. +func (c *Call) Equal(r *Call) bool { + return c.SourcePath == r.SourcePath && c.Line == r.Line && c.Func == r.Func && c.Args.Equal(&r.Args) +} + +// Similar returns true if the two Call are equal or almost but not quite +// equal. +func (c *Call) Similar(r *Call, similar Similarity) bool { + return c.SourcePath == r.SourcePath && c.Line == r.Line && c.Func == r.Func && c.Args.Similar(&r.Args, similar) +} + +// Merge merges two similar Call, zapping out differences. +func (c *Call) Merge(r *Call) Call { + return Call{ + SourcePath: c.SourcePath, + Line: c.Line, + Func: c.Func, + Args: c.Args.Merge(&r.Args), + } +} + +// SourceName returns the base file name of the source file. +func (c *Call) SourceName() string { + return filepath.Base(c.SourcePath) +} + +// SourceLine returns "source.go:line", including only the base file name. +func (c *Call) SourceLine() string { + return fmt.Sprintf("%s:%d", c.SourceName(), c.Line) +} + +// LocalSourcePath is the full path name of the source file as seen in the host. +func (c *Call) LocalSourcePath() string { + // TODO(maruel): Call needs members goroot and gopaths. + if strings.HasPrefix(c.SourcePath, goroot) { + return filepath.Join(localgoroot, c.SourcePath[len(goroot):]) + } + for prefix, dest := range gopaths { + if strings.HasPrefix(c.SourcePath, prefix) { + return filepath.Join(dest, c.SourcePath[len(prefix):]) + } + } + return c.SourcePath +} + +// FullSourceLine returns "/path/to/source.go:line". +// +// This file path is mutated to look like the local path. +func (c *Call) FullSourceLine() string { + return fmt.Sprintf("%s:%d", c.SourcePath, c.Line) +} + +// PkgSource is one directory plus the file name of the source file. +func (c *Call) PkgSource() string { + return filepath.Join(filepath.Base(filepath.Dir(c.SourcePath)), c.SourceName()) +} + +const testMainSource = "_test" + string(os.PathSeparator) + "_testmain.go" + +// IsStdlib returns true if it is a Go standard library function. This includes +// the 'go test' generated main executable. +func (c *Call) IsStdlib() bool { + // Consider _test/_testmain.go as stdlib since it's injected by "go test". + return (goroot != "" && strings.HasPrefix(c.SourcePath, goroot)) || c.PkgSource() == testMainSource +} + +// IsPkgMain returns true if it is in the main package. +func (c *Call) IsPkgMain() bool { + return c.Func.PkgName() == "main" +} + +// Stack is a call stack. +type Stack struct { + Calls []Call // Call stack. First is original function, last is leaf function. + Elided bool // Happens when there's >100 items in Stack, currently hardcoded in package runtime. +} + +// Equal returns true on if both call stacks are exactly equal. +func (s *Stack) Equal(r *Stack) bool { + if len(s.Calls) != len(r.Calls) || s.Elided != r.Elided { + return false + } + for i := range s.Calls { + if !s.Calls[i].Equal(&r.Calls[i]) { + return false + } + } + return true +} + +// Similar returns true if the two Stack are equal or almost but not quite +// equal. +func (s *Stack) Similar(r *Stack, similar Similarity) bool { + if len(s.Calls) != len(r.Calls) || s.Elided != r.Elided { + return false + } + for i := range s.Calls { + if !s.Calls[i].Similar(&r.Calls[i], similar) { + return false + } + } + return true +} + +// Merge merges two similar Stack, zapping out differences. +func (s *Stack) Merge(r *Stack) *Stack { + // Assumes similar stacks have the same length. + out := &Stack{ + Calls: make([]Call, len(s.Calls)), + Elided: s.Elided, + } + for i := range s.Calls { + out.Calls[i] = s.Calls[i].Merge(&r.Calls[i]) + } + return out +} + +// Less compares two Stack, where the ones that are less are more +// important, so they come up front. A Stack with more private functions is +// 'less' so it is at the top. Inversely, a Stack with only public +// functions is 'more' so it is at the bottom. +func (s *Stack) Less(r *Stack) bool { + lStdlib := 0 + lPrivate := 0 + for _, c := range s.Calls { + if c.IsStdlib() { + lStdlib++ + } else { + lPrivate++ + } + } + rStdlib := 0 + rPrivate := 0 + for _, s := range r.Calls { + if s.IsStdlib() { + rStdlib++ + } else { + rPrivate++ + } + } + if lPrivate > rPrivate { + return true + } + if lPrivate < rPrivate { + return false + } + if lStdlib > rStdlib { + return false + } + if lStdlib < rStdlib { + return true + } + + // Stack lengths are the same. + for x := range s.Calls { + if s.Calls[x].Func.Raw < r.Calls[x].Func.Raw { + return true + } + if s.Calls[x].Func.Raw > r.Calls[x].Func.Raw { + return true + } + if s.Calls[x].PkgSource() < r.Calls[x].PkgSource() { + return true + } + if s.Calls[x].PkgSource() > r.Calls[x].PkgSource() { + return true + } + if s.Calls[x].Line < r.Calls[x].Line { + return true + } + if s.Calls[x].Line > r.Calls[x].Line { + return true + } + } + return false +} + +// Signature represents the signature of one or multiple goroutines. +// +// It is effectively the stack trace plus the goroutine internal bits, like +// it's state, if it is thread locked, which call site created this goroutine, +// etc. +type Signature struct { + // Use git grep 'gopark(|unlock)\(' to find them all plus everything listed + // in runtime/traceback.go. Valid values includes: + // - chan send, chan receive, select + // - finalizer wait, mark wait (idle), + // - Concurrent GC wait, GC sweep wait, force gc (idle) + // - IO wait, panicwait + // - semacquire, semarelease + // - sleep, timer goroutine (idle) + // - trace reader (blocked) + // Stuck cases: + // - chan send (nil chan), chan receive (nil chan), select (no cases) + // Runnable states: + // - idle, runnable, running, syscall, waiting, dead, enqueue, copystack, + // Scan states: + // - scan, scanrunnable, scanrunning, scansyscall, scanwaiting, scandead, + // scanenqueue + State string + CreatedBy Call // Which other goroutine which created this one. + SleepMin int // Wait time in minutes, if applicable. + SleepMax int // Wait time in minutes, if applicable. + Stack Stack + Locked bool // Locked to an OS thread. +} + +// Equal returns true only if both signatures are exactly equal. +func (s *Signature) Equal(r *Signature) bool { + if s.State != r.State || !s.CreatedBy.Equal(&r.CreatedBy) || s.Locked != r.Locked || s.SleepMin != r.SleepMin || s.SleepMax != r.SleepMax { + return false + } + return s.Stack.Equal(&r.Stack) +} + +// Similar returns true if the two Signature are equal or almost but not quite +// equal. +func (s *Signature) Similar(r *Signature, similar Similarity) bool { + if s.State != r.State || !s.CreatedBy.Similar(&r.CreatedBy, similar) { + return false + } + if similar == ExactFlags && s.Locked != r.Locked { + return false + } + return s.Stack.Similar(&r.Stack, similar) +} + +// Merge merges two similar Signature, zapping out differences. +func (s *Signature) Merge(r *Signature) *Signature { + min := s.SleepMin + if r.SleepMin < min { + min = r.SleepMin + } + max := s.SleepMax + if r.SleepMax > max { + max = r.SleepMax + } + return &Signature{ + State: s.State, // Drop right side. + CreatedBy: s.CreatedBy, // Drop right side. + SleepMin: min, + SleepMax: max, + Stack: *s.Stack.Merge(&r.Stack), + Locked: s.Locked || r.Locked, // TODO(maruel): This is weirdo. + } +} + +// Less compares two Signature, where the ones that are less are more +// important, so they come up front. A Signature with more private functions is +// 'less' so it is at the top. Inversely, a Signature with only public +// functions is 'more' so it is at the bottom. +func (s *Signature) Less(r *Signature) bool { + if s.Stack.Less(&r.Stack) { + return true + } + if r.Stack.Less(&s.Stack) { + return false + } + if s.Locked && !r.Locked { + return true + } + if r.Locked && !s.Locked { + return false + } + if s.State < r.State { + return true + } + if s.State > r.State { + return false + } + return false +} + +// Goroutine represents the state of one goroutine, including the stack trace. +type Goroutine struct { + Signature // It's stack trace, internal bits, state, which call site created it, etc. + ID int // Goroutine ID. + First bool // First is the goroutine first printed, normally the one that crashed. +} + +// scanLines is similar to bufio.ScanLines except that it: +// - doesn't drop '\n' +// - doesn't strip '\r' +// - returns when the data is bufio.MaxScanTokenSize bytes +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + return i + 1, data[0 : i+1], nil + } + if atEOF { + return len(data), data, nil + } + if len(data) >= bufio.MaxScanTokenSize { + // Returns the line even if it is not at EOF nor has a '\n', otherwise the + // scanner will return bufio.ErrTooLong which is definitely not what we + // want. + return len(data), data, nil + } + return 0, nil, nil +} + +// ParseDump processes the output from runtime.Stack(). +// +// It supports piping from another command and assumes there is junk before the +// actual stack trace. The junk is streamed to out. +func ParseDump(r io.Reader, out io.Writer) ([]Goroutine, error) { + goroutines := make([]Goroutine, 0, 16) + var goroutine *Goroutine + scanner := bufio.NewScanner(r) + scanner.Split(scanLines) + // TODO(maruel): Use a formal state machine. Patterns follows: + // - reRoutineHeader + // Either: + // - reUnavail + // - reFunc + reFile in a loop + // - reElided + // Optionally ends with: + // - reCreated + reFile + // Between each goroutine stack dump: an empty line + created := false + // firstLine is the first line after the reRoutineHeader header line. + firstLine := false + for scanner.Scan() { + line := scanner.Text() + if line == "\n" || line == "\r\n" { + if goroutine != nil { + goroutine = nil + continue + } + } else if line[len(line)-1] == '\n' { + if goroutine == nil { + if match := reRoutineHeader.FindStringSubmatch(line); match != nil { + if id, err := strconv.Atoi(match[1]); err == nil { + // See runtime/traceback.go. + // ", \d+ minutes, locked to thread" + items := strings.Split(match[2], ", ") + sleep := 0 + locked := false + for i := 1; i < len(items); i++ { + if items[i] == lockedToThread { + locked = true + continue + } + // Look for duration, if any. + if match2 := reMinutes.FindStringSubmatch(items[i]); match2 != nil { + sleep, _ = strconv.Atoi(match2[1]) + } + } + goroutines = append(goroutines, Goroutine{ + Signature: Signature{ + State: items[0], + SleepMin: sleep, + SleepMax: sleep, + Locked: locked, + }, + ID: id, + First: len(goroutines) == 0, + }) + goroutine = &goroutines[len(goroutines)-1] + firstLine = true + continue + } + } + } else { + if firstLine { + firstLine = false + if match := reUnavail.FindStringSubmatch(line); match != nil { + // Generate a fake stack entry. + goroutine.Stack.Calls = []Call{{SourcePath: ""}} + continue + } + } + + if match := reFile.FindStringSubmatch(line); match != nil { + // Triggers after a reFunc or a reCreated. + num, err := strconv.Atoi(match[2]) + if err != nil { + return goroutines, fmt.Errorf("failed to parse int on line: \"%s\"", line) + } + if created { + created = false + goroutine.CreatedBy.SourcePath = match[1] + goroutine.CreatedBy.Line = num + } else { + i := len(goroutine.Stack.Calls) - 1 + if i < 0 { + return goroutines, errors.New("unexpected order") + } + goroutine.Stack.Calls[i].SourcePath = match[1] + goroutine.Stack.Calls[i].Line = num + } + continue + } + + if match := reCreated.FindStringSubmatch(line); match != nil { + created = true + goroutine.CreatedBy.Func.Raw = match[1] + continue + } + + if match := reFunc.FindStringSubmatch(line); match != nil { + args := Args{} + for _, a := range strings.Split(match[2], ", ") { + if a == "..." { + args.Elided = true + continue + } + if a == "" { + // Remaining values were dropped. + break + } + v, err := strconv.ParseUint(a, 0, 64) + if err != nil { + return goroutines, fmt.Errorf("failed to parse int on line: \"%s\"", line) + } + args.Values = append(args.Values, Arg{Value: v}) + } + goroutine.Stack.Calls = append(goroutine.Stack.Calls, Call{Func: Function{match[1]}, Args: args}) + continue + } + + if match := reElided.FindStringSubmatch(line); match != nil { + goroutine.Stack.Elided = true + continue + } + } + } + _, _ = io.WriteString(out, line) + goroutine = nil + } + nameArguments(goroutines) + // Mutate global state. + // TODO(maruel): Make this part of the context instead of a global. + if goroot == "" { + findRoots(goroutines) + } + return goroutines, scanner.Err() +} + +// NoRebase disables GOROOT and GOPATH guessing in ParseDump(). +// +// BUG: This function will be removed in v2, as ParseDump() will accept a flag +// explicitly. +func NoRebase() { + goroot = runtime.GOROOT() + gopaths = map[string]string{} + for _, p := range getGOPATHs() { + gopaths[p] = p + } +} + +// Private stuff. + +func nameArguments(goroutines []Goroutine) { + // Set a name for any pointer occurring more than once. + type object struct { + args []*Arg + inPrimary bool + id int + } + objects := map[uint64]object{} + // Enumerate all the arguments. + for i := range goroutines { + for j := range goroutines[i].Stack.Calls { + for k := range goroutines[i].Stack.Calls[j].Args.Values { + arg := goroutines[i].Stack.Calls[j].Args.Values[k] + if arg.IsPtr() { + objects[arg.Value] = object{ + args: append(objects[arg.Value].args, &goroutines[i].Stack.Calls[j].Args.Values[k]), + inPrimary: objects[arg.Value].inPrimary || i == 0, + } + } + } + } + // CreatedBy.Args is never set. + } + order := make(uint64Slice, 0, len(objects)/2) + for k, obj := range objects { + if len(obj.args) > 1 && obj.inPrimary { + order = append(order, k) + } + } + sort.Sort(order) + nextID := 1 + for _, k := range order { + for _, arg := range objects[k].args { + arg.Name = fmt.Sprintf("#%d", nextID) + } + nextID++ + } + + // Now do the rest. This is done so the output is deterministic. + order = make(uint64Slice, 0, len(objects)) + for k := range objects { + order = append(order, k) + } + sort.Sort(order) + for _, k := range order { + // Process the remaining pointers, they were not referenced by primary + // thread so will have higher IDs. + if objects[k].inPrimary { + continue + } + for _, arg := range objects[k].args { + arg.Name = fmt.Sprintf("#%d", nextID) + } + nextID++ + } +} + +// hasPathPrefix returns true if any of s is the prefix of p. +func hasPathPrefix(p string, s map[string]string) bool { + for prefix := range s { + if strings.HasPrefix(p, prefix+"/") { + return true + } + } + return false +} + +// getFiles returns all the source files deduped and ordered. +func getFiles(goroutines []Goroutine) []string { + files := map[string]struct{}{} + for _, g := range goroutines { + for _, c := range g.Stack.Calls { + files[c.SourcePath] = struct{}{} + } + } + out := make([]string, 0, len(files)) + for f := range files { + out = append(out, f) + } + sort.Strings(out) + return out +} + +// splitPath splits a path into its components. +// +// The first item has its initial path separator kept. +func splitPath(p string) []string { + if p == "" { + return nil + } + var out []string + s := "" + for _, c := range p { + if c != '/' || (len(out) == 0 && strings.Count(s, "/") == len(s)) { + s += string(c) + } else if s != "" { + out = append(out, s) + s = "" + } + } + if s != "" { + out = append(out, s) + } + return out +} + +// isFile returns true if the path is a valid file. +func isFile(p string) bool { + // TODO(maruel): Is it faster to open the file or to stat it? Worth a perf + // test on Windows. + i, err := os.Stat(p) + return err == nil && !i.IsDir() +} + +// isRootIn returns a root if the file split in parts is rooted in root. +func rootedIn(root string, parts []string) string { + //log.Printf("rootIn(%s, %v)", root, parts) + for i := 1; i < len(parts); i++ { + suffix := filepath.Join(parts[i:]...) + if isFile(filepath.Join(root, suffix)) { + return filepath.Join(parts[:i]...) + } + } + return "" +} + +// findRoots sets global variables goroot and gopath. +// +// TODO(maruel): In v2, it will be a property of the new struct that will +// contain the goroutines. +func findRoots(goroutines []Goroutine) { + gopaths = map[string]string{} + for _, f := range getFiles(goroutines) { + // TODO(maruel): Could a stack dump have mixed cases? I think it's + // possible, need to confirm and handle. + //log.Printf(" Analyzing %s", f) + if goroot != "" && strings.HasPrefix(f, goroot+"/") { + continue + } + if gopaths != nil && hasPathPrefix(f, gopaths) { + continue + } + parts := splitPath(f) + if goroot == "" { + if r := rootedIn(localgoroot, parts); r != "" { + goroot = r + log.Printf("Found GOROOT=%s", goroot) + continue + } + } + found := false + for _, l := range localgopaths { + if r := rootedIn(l, parts); r != "" { + log.Printf("Found GOPATH=%s", r) + gopaths[r] = l + found = true + break + } + } + if !found { + // If the source is not found, just too bad. + //log.Printf("Failed to find locally: %s / %s", f, goroot) + } + } +} + +func getGOPATHs() []string { + var out []string + for _, v := range filepath.SplitList(os.Getenv("GOPATH")) { + // Disallow non-absolute paths? + if v != "" { + out = append(out, v) + } + } + if len(out) == 0 { + homeDir := "" + u, err := user.Current() + if err != nil { + homeDir = os.Getenv("HOME") + if homeDir == "" { + panic(fmt.Sprintf("Could not get current user or $HOME: %s\n", err.Error())) + } + } else { + homeDir = u.HomeDir + } + out = []string{homeDir + "go"} + } + return out +} + +type uint64Slice []uint64 + +func (a uint64Slice) Len() int { return len(a) } +func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } diff --git a/vendor/github.com/maruel/panicparse/stack/ui.go b/vendor/github.com/maruel/panicparse/stack/ui.go new file mode 100644 index 0000000..b125fc9 --- /dev/null +++ b/vendor/github.com/maruel/panicparse/stack/ui.go @@ -0,0 +1,139 @@ +// Copyright 2016 Marc-Antoine Ruel. All rights reserved. +// Use of this source code is governed under the Apache License, Version 2.0 +// that can be found in the LICENSE file. + +package stack + +import ( + "fmt" + "strings" +) + +// Palette defines the color used. +// +// An empty object Palette{} can be used to disable coloring. +type Palette struct { + EOLReset string + + // Routine header. + RoutineFirst string // The first routine printed. + Routine string // Following routines. + CreatedBy string + + // Call line. + Package string + SourceFile string + FunctionStdLib string + FunctionStdLibExported string + FunctionMain string + FunctionOther string + FunctionOtherExported string + Arguments string +} + +// CalcLengths returns the maximum length of the source lines and package names. +func CalcLengths(buckets Buckets, fullPath bool) (int, int) { + srcLen := 0 + pkgLen := 0 + for _, bucket := range buckets { + for _, line := range bucket.Signature.Stack.Calls { + l := 0 + if fullPath { + l = len(line.FullSourceLine()) + } else { + l = len(line.SourceLine()) + } + if l > srcLen { + srcLen = l + } + l = len(line.Func.PkgName()) + if l > pkgLen { + pkgLen = l + } + } + } + return srcLen, pkgLen +} + +// functionColor returns the color to be used for the function name based on +// the type of package the function is in. +func (p *Palette) functionColor(line *Call) string { + if line.IsStdlib() { + if line.Func.IsExported() { + return p.FunctionStdLibExported + } + return p.FunctionStdLib + } else if line.IsPkgMain() { + return p.FunctionMain + } else if line.Func.IsExported() { + return p.FunctionOtherExported + } + return p.FunctionOther +} + +// routineColor returns the color for the header of the goroutines bucket. +func (p *Palette) routineColor(bucket *Bucket, multipleBuckets bool) string { + if bucket.First() && multipleBuckets { + return p.RoutineFirst + } + return p.Routine +} + +// BucketHeader prints the header of a goroutine signature. +func (p *Palette) BucketHeader(bucket *Bucket, fullPath, multipleBuckets bool) string { + extra := "" + if bucket.SleepMax != 0 { + if bucket.SleepMin != bucket.SleepMax { + extra += fmt.Sprintf(" [%d~%d minutes]", bucket.SleepMin, bucket.SleepMax) + } else { + extra += fmt.Sprintf(" [%d minutes]", bucket.SleepMax) + } + } + if bucket.Locked { + extra += " [locked]" + } + created := bucket.CreatedBy.Func.PkgDotName() + if created != "" { + created += " @ " + if fullPath { + created += bucket.CreatedBy.FullSourceLine() + } else { + created += bucket.CreatedBy.SourceLine() + } + extra += p.CreatedBy + " [Created by " + created + "]" + } + return fmt.Sprintf( + "%s%d: %s%s%s\n", + p.routineColor(bucket, multipleBuckets), len(bucket.Routines), + bucket.State, extra, + p.EOLReset) +} + +// callLine prints one stack line. +func (p *Palette) callLine(line *Call, srcLen, pkgLen int, fullPath bool) string { + src := "" + if fullPath { + src = line.FullSourceLine() + } else { + src = line.SourceLine() + } + return fmt.Sprintf( + " %s%-*s %s%-*s %s%s%s(%s)%s", + p.Package, pkgLen, line.Func.PkgName(), + p.SourceFile, srcLen, src, + p.functionColor(line), line.Func.Name(), + p.Arguments, line.Args, + p.EOLReset) +} + +// StackLines prints one complete stack trace, without the header. +func (p *Palette) StackLines(signature *Signature, srcLen, pkgLen int, fullPath bool) string { + out := make([]string, len(signature.Stack.Calls)) + for i := range signature.Stack.Calls { + out[i] = p.callLine(&signature.Stack.Calls[i], srcLen, pkgLen, fullPath) + } + if signature.Stack.Elided { + out = append(out, " (...)") + } + return strings.Join(out, "\n") + "\n" +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 0000000..98db8f0 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000..91b5cef --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 0000000..56729a9 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 0000000..1f28d77 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 0000000..887f203 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 0000000..e17a547 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,884 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") +) + +// Writer provide colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + oldattr word + oldpos coord +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + if c2 == ']' { + if err := doTitleSequence(er); err != nil { + break loop + } + continue + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + var m byte + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x - 1) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 0000000..9721e16 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 0000000..5597e02 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000..65dc692 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000..1e69004 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000..17d4f90 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 0000000..9584a98 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000..42f2514 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 0000000..7384cf9 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine,!ppc64,!ppc64le + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go new file mode 100644 index 0000000..44e5d21 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go @@ -0,0 +1,19 @@ +// +build linux +// +build ppc64 ppc64le + +package isatty + +import ( + "unsafe" + + syscall "golang.org/x/sys/unix" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 0000000..9d8b4a5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,10 @@ +// +build !windows +// +build !appengine + +package isatty + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000..1f0c6bf --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000..af51cbc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 0000000..5c9c2a3 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 0000000..91b5cef --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd new file mode 100644 index 0000000..66663a9 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.mkd @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 0000000..82568a1 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,1235 @@ +package runewidth + +import "os" + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{EastAsianWidth} +) + +func init() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + // func (t table) IncludesRune(r rune) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) / 2 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x2029}, + {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, + {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, + {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, + {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, + {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, + {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, + {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, + {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, + {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, + {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, + {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, + {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, + {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, + {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, + {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, + {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, + {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, + {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, + {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, + {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, + {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, + {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, + {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, + {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, + {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, + {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, + {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, + {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, + {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, + {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, + {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, + {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, + {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, + {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, + {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, + {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, + {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, + {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, + {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, + {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, + {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, + {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, + {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, + {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, + {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, + {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, + {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, + {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, + {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, + {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, + {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, + {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, + {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, + {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, + {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, + {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, + {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, + {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, + {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, + {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, + {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, + {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, + {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, + {0xE0100, 0xE01EF}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, + {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, + {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, + {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, + {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, + {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, + {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, + {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, + {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, + {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, + {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, + {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, + {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, + {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, + {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, + {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, + {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, + {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, + {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} + +var emoji = table{ + {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F321}, {0x1F324, 0x1F32C}, + {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, {0x1F396, 0x1F397}, + {0x1F399, 0x1F39B}, {0x1F39E, 0x1F39F}, {0x1F3CB, 0x1F3CE}, + {0x1F3D4, 0x1F3DF}, {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3F7}, + {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FD}, + {0x1F549, 0x1F54A}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F579}, + {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, + {0x1F5A5, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, + {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, + {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, + {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, + {0x1F5FA, 0x1F5FA}, {0x1F6CB, 0x1F6CF}, {0x1F6E0, 0x1F6E5}, + {0x1F6E9, 0x1F6E9}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F3}, +} + +var notassigned = table{ + {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, + {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, + {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, + {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, + {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, + {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, + {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, + {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, + {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, + {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, + {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, + {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, + {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, + {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, + {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, + {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, + {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, + {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, + {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, + {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, + {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, + {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, + {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, + {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, + {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, + {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, + {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, + {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, + {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, + {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, + {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, + {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, + {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, + {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, + {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, + {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, + {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, + {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, + {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, + {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, + {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, + {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, + {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, + {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, + {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, + {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, + {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, + {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, + {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, + {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, + {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, + {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, + {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, + {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, + {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, + {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, + {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, + {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, + {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, + {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, + {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, + {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, + {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, + {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, + {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, + {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, + {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, + {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, + {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, + {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, + {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, + {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, + {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, + {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, + {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, + {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, + {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, + {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, + {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, + {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, + {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, + {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, + {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, + {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, + {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, + {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, + {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, + {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, + {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, + {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, + {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, + {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, + {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, + {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, + {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, + {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, + {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, + {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, + {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, + {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, + {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, + {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, + {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, + {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, + {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, + {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, + {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, + {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, + {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, + {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, + {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, + {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, + {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, + {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, + {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, + {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, + {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, + {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, + {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, + {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, + {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, + {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, + {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, + {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, + {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, + {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, + {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, + {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, + {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, + {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, + {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, + {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, + {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, + {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, + {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, + {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, + {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, + {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, + {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, + {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, + {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, + {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, + {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, + {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, + {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, + {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, + {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, + {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, + {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, + {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, + {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, + {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, + {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, + {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, + {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, + {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, + {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, + {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, + {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, + {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, + {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, + {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, + {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, + {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, + {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, + {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, + {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, + {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, + {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, + {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, + {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, + {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, + {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, + {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, + {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, + {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, + {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, + {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, + {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, + {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, + {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, + {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, + {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, + {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, + {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, + {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, + {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, + {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, + {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, + {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, + {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, + {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, + {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, + {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, + {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, + {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, + {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, + {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, + {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, + {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, + {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, + {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, + {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, + {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, + {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, + {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, + {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, + {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, + {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, + {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, + {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, + {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, + {0xFFFFE, 0xFFFFF}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x007F}, {0x0080, 0x009F}, + {0x00A0, 0x00A0}, {0x00A9, 0x00A9}, {0x00AB, 0x00AB}, + {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, {0x00C0, 0x00C5}, + {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, {0x00D9, 0x00DD}, + {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, {0x00EB, 0x00EB}, + {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, {0x00F4, 0x00F6}, + {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, {0x00FF, 0x00FF}, + {0x0100, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x017F}, {0x0180, 0x01BA}, {0x01BB, 0x01BB}, + {0x01BC, 0x01BF}, {0x01C0, 0x01C3}, {0x01C4, 0x01CD}, + {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, {0x01D3, 0x01D3}, + {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, {0x01D9, 0x01D9}, + {0x01DB, 0x01DB}, {0x01DD, 0x024F}, {0x0250, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x0293}, {0x0294, 0x0294}, + {0x0295, 0x02AF}, {0x02B0, 0x02C1}, {0x02C2, 0x02C3}, + {0x02C5, 0x02C5}, {0x02C6, 0x02C6}, {0x02C8, 0x02C8}, + {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, {0x02D1, 0x02D1}, + {0x02D2, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02E4}, {0x02E5, 0x02EB}, {0x02EC, 0x02EC}, + {0x02ED, 0x02ED}, {0x02EE, 0x02EE}, {0x02EF, 0x02FF}, + {0x0370, 0x0373}, {0x0374, 0x0374}, {0x0375, 0x0375}, + {0x0376, 0x0377}, {0x037A, 0x037A}, {0x037B, 0x037D}, + {0x037E, 0x037E}, {0x037F, 0x037F}, {0x0384, 0x0385}, + {0x0386, 0x0386}, {0x0387, 0x0387}, {0x0388, 0x038A}, + {0x038C, 0x038C}, {0x038E, 0x0390}, {0x03AA, 0x03B0}, + {0x03C2, 0x03C2}, {0x03CA, 0x03F5}, {0x03F6, 0x03F6}, + {0x03F7, 0x03FF}, {0x0400, 0x0400}, {0x0402, 0x040F}, + {0x0450, 0x0450}, {0x0452, 0x0481}, {0x0482, 0x0482}, + {0x0483, 0x0487}, {0x0488, 0x0489}, {0x048A, 0x04FF}, + {0x0500, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x0559}, + {0x055A, 0x055F}, {0x0561, 0x0587}, {0x0589, 0x0589}, + {0x058A, 0x058A}, {0x058D, 0x058E}, {0x058F, 0x058F}, + {0x0591, 0x05BD}, {0x05BE, 0x05BE}, {0x05BF, 0x05BF}, + {0x05C0, 0x05C0}, {0x05C1, 0x05C2}, {0x05C3, 0x05C3}, + {0x05C4, 0x05C5}, {0x05C6, 0x05C6}, {0x05C7, 0x05C7}, + {0x05D0, 0x05EA}, {0x05F0, 0x05F2}, {0x05F3, 0x05F4}, + {0x0600, 0x0605}, {0x0606, 0x0608}, {0x0609, 0x060A}, + {0x060B, 0x060B}, {0x060C, 0x060D}, {0x060E, 0x060F}, + {0x0610, 0x061A}, {0x061B, 0x061B}, {0x061C, 0x061C}, + {0x061E, 0x061F}, {0x0620, 0x063F}, {0x0640, 0x0640}, + {0x0641, 0x064A}, {0x064B, 0x065F}, {0x0660, 0x0669}, + {0x066A, 0x066D}, {0x066E, 0x066F}, {0x0670, 0x0670}, + {0x0671, 0x06D3}, {0x06D4, 0x06D4}, {0x06D5, 0x06D5}, + {0x06D6, 0x06DC}, {0x06DD, 0x06DD}, {0x06DE, 0x06DE}, + {0x06DF, 0x06E4}, {0x06E5, 0x06E6}, {0x06E7, 0x06E8}, + {0x06E9, 0x06E9}, {0x06EA, 0x06ED}, {0x06EE, 0x06EF}, + {0x06F0, 0x06F9}, {0x06FA, 0x06FC}, {0x06FD, 0x06FE}, + {0x06FF, 0x06FF}, {0x0700, 0x070D}, {0x070F, 0x070F}, + {0x0710, 0x0710}, {0x0711, 0x0711}, {0x0712, 0x072F}, + {0x0730, 0x074A}, {0x074D, 0x074F}, {0x0750, 0x077F}, + {0x0780, 0x07A5}, {0x07A6, 0x07B0}, {0x07B1, 0x07B1}, + {0x07C0, 0x07C9}, {0x07CA, 0x07EA}, {0x07EB, 0x07F3}, + {0x07F4, 0x07F5}, {0x07F6, 0x07F6}, {0x07F7, 0x07F9}, + {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x0816, 0x0819}, + {0x081A, 0x081A}, {0x081B, 0x0823}, {0x0824, 0x0824}, + {0x0825, 0x0827}, {0x0828, 0x0828}, {0x0829, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x0858}, {0x0859, 0x085B}, + {0x085E, 0x085E}, {0x08A0, 0x08B4}, {0x08B6, 0x08BD}, + {0x08D4, 0x08E1}, {0x08E2, 0x08E2}, {0x08E3, 0x08FF}, + {0x0900, 0x0902}, {0x0903, 0x0903}, {0x0904, 0x0939}, + {0x093A, 0x093A}, {0x093B, 0x093B}, {0x093C, 0x093C}, + {0x093D, 0x093D}, {0x093E, 0x0940}, {0x0941, 0x0948}, + {0x0949, 0x094C}, {0x094D, 0x094D}, {0x094E, 0x094F}, + {0x0950, 0x0950}, {0x0951, 0x0957}, {0x0958, 0x0961}, + {0x0962, 0x0963}, {0x0964, 0x0965}, {0x0966, 0x096F}, + {0x0970, 0x0970}, {0x0971, 0x0971}, {0x0972, 0x097F}, + {0x0980, 0x0980}, {0x0981, 0x0981}, {0x0982, 0x0983}, + {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, + {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, + {0x09BC, 0x09BC}, {0x09BD, 0x09BD}, {0x09BE, 0x09C0}, + {0x09C1, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CC}, + {0x09CD, 0x09CD}, {0x09CE, 0x09CE}, {0x09D7, 0x09D7}, + {0x09DC, 0x09DD}, {0x09DF, 0x09E1}, {0x09E2, 0x09E3}, + {0x09E6, 0x09EF}, {0x09F0, 0x09F1}, {0x09F2, 0x09F3}, + {0x09F4, 0x09F9}, {0x09FA, 0x09FA}, {0x09FB, 0x09FB}, + {0x0A01, 0x0A02}, {0x0A03, 0x0A03}, {0x0A05, 0x0A0A}, + {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, + {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, + {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A40}, {0x0A41, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A6F}, + {0x0A70, 0x0A71}, {0x0A72, 0x0A74}, {0x0A75, 0x0A75}, + {0x0A81, 0x0A82}, {0x0A83, 0x0A83}, {0x0A85, 0x0A8D}, + {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, + {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0ABC}, + {0x0ABD, 0x0ABD}, {0x0ABE, 0x0AC0}, {0x0AC1, 0x0AC5}, + {0x0AC7, 0x0AC8}, {0x0AC9, 0x0AC9}, {0x0ACB, 0x0ACC}, + {0x0ACD, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE1}, + {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AF0, 0x0AF0}, + {0x0AF1, 0x0AF1}, {0x0AF9, 0x0AF9}, {0x0B01, 0x0B01}, + {0x0B02, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, + {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, + {0x0B35, 0x0B39}, {0x0B3C, 0x0B3C}, {0x0B3D, 0x0B3D}, + {0x0B3E, 0x0B3E}, {0x0B3F, 0x0B3F}, {0x0B40, 0x0B40}, + {0x0B41, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4C}, + {0x0B4D, 0x0B4D}, {0x0B56, 0x0B56}, {0x0B57, 0x0B57}, + {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B62, 0x0B63}, + {0x0B66, 0x0B6F}, {0x0B70, 0x0B70}, {0x0B71, 0x0B71}, + {0x0B72, 0x0B77}, {0x0B82, 0x0B82}, {0x0B83, 0x0B83}, + {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, + {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, + {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, + {0x0BBE, 0x0BBF}, {0x0BC0, 0x0BC0}, {0x0BC1, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCC}, {0x0BCD, 0x0BCD}, + {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, + {0x0BF0, 0x0BF2}, {0x0BF3, 0x0BF8}, {0x0BF9, 0x0BF9}, + {0x0BFA, 0x0BFA}, {0x0C00, 0x0C00}, {0x0C01, 0x0C03}, + {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, + {0x0C2A, 0x0C39}, {0x0C3D, 0x0C3D}, {0x0C3E, 0x0C40}, + {0x0C41, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C61}, + {0x0C62, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C78, 0x0C7E}, + {0x0C7F, 0x0C7F}, {0x0C80, 0x0C80}, {0x0C81, 0x0C81}, + {0x0C82, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CBC}, {0x0CBD, 0x0CBD}, {0x0CBE, 0x0CBE}, + {0x0CBF, 0x0CBF}, {0x0CC0, 0x0CC4}, {0x0CC6, 0x0CC6}, + {0x0CC7, 0x0CC8}, {0x0CCA, 0x0CCB}, {0x0CCC, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE1}, + {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, + {0x0D01, 0x0D01}, {0x0D02, 0x0D03}, {0x0D05, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D}, + {0x0D3E, 0x0D40}, {0x0D41, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4C}, {0x0D4D, 0x0D4D}, {0x0D4E, 0x0D4E}, + {0x0D4F, 0x0D4F}, {0x0D54, 0x0D56}, {0x0D57, 0x0D57}, + {0x0D58, 0x0D5E}, {0x0D5F, 0x0D61}, {0x0D62, 0x0D63}, + {0x0D66, 0x0D6F}, {0x0D70, 0x0D78}, {0x0D79, 0x0D79}, + {0x0D7A, 0x0D7F}, {0x0D82, 0x0D83}, {0x0D85, 0x0D96}, + {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, + {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD1}, + {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, + {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF3}, {0x0DF4, 0x0DF4}, + {0x0E01, 0x0E30}, {0x0E31, 0x0E31}, {0x0E32, 0x0E33}, + {0x0E34, 0x0E3A}, {0x0E3F, 0x0E3F}, {0x0E40, 0x0E45}, + {0x0E46, 0x0E46}, {0x0E47, 0x0E4E}, {0x0E4F, 0x0E4F}, + {0x0E50, 0x0E59}, {0x0E5A, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, + {0x0E8D, 0x0E8D}, {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, + {0x0EA1, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, + {0x0EAA, 0x0EAB}, {0x0EAD, 0x0EB0}, {0x0EB1, 0x0EB1}, + {0x0EB2, 0x0EB3}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, + {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, + {0x0F00, 0x0F00}, {0x0F01, 0x0F03}, {0x0F04, 0x0F12}, + {0x0F13, 0x0F13}, {0x0F14, 0x0F14}, {0x0F15, 0x0F17}, + {0x0F18, 0x0F19}, {0x0F1A, 0x0F1F}, {0x0F20, 0x0F29}, + {0x0F2A, 0x0F33}, {0x0F34, 0x0F34}, {0x0F35, 0x0F35}, + {0x0F36, 0x0F36}, {0x0F37, 0x0F37}, {0x0F38, 0x0F38}, + {0x0F39, 0x0F39}, {0x0F3A, 0x0F3A}, {0x0F3B, 0x0F3B}, + {0x0F3C, 0x0F3C}, {0x0F3D, 0x0F3D}, {0x0F3E, 0x0F3F}, + {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F7E}, + {0x0F7F, 0x0F7F}, {0x0F80, 0x0F84}, {0x0F85, 0x0F85}, + {0x0F86, 0x0F87}, {0x0F88, 0x0F8C}, {0x0F8D, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FBE, 0x0FC5}, {0x0FC6, 0x0FC6}, + {0x0FC7, 0x0FCC}, {0x0FCE, 0x0FCF}, {0x0FD0, 0x0FD4}, + {0x0FD5, 0x0FD8}, {0x0FD9, 0x0FDA}, {0x1000, 0x102A}, + {0x102B, 0x102C}, {0x102D, 0x1030}, {0x1031, 0x1031}, + {0x1032, 0x1037}, {0x1038, 0x1038}, {0x1039, 0x103A}, + {0x103B, 0x103C}, {0x103D, 0x103E}, {0x103F, 0x103F}, + {0x1040, 0x1049}, {0x104A, 0x104F}, {0x1050, 0x1055}, + {0x1056, 0x1057}, {0x1058, 0x1059}, {0x105A, 0x105D}, + {0x105E, 0x1060}, {0x1061, 0x1061}, {0x1062, 0x1064}, + {0x1065, 0x1066}, {0x1067, 0x106D}, {0x106E, 0x1070}, + {0x1071, 0x1074}, {0x1075, 0x1081}, {0x1082, 0x1082}, + {0x1083, 0x1084}, {0x1085, 0x1086}, {0x1087, 0x108C}, + {0x108D, 0x108D}, {0x108E, 0x108E}, {0x108F, 0x108F}, + {0x1090, 0x1099}, {0x109A, 0x109C}, {0x109D, 0x109D}, + {0x109E, 0x109F}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FB, 0x10FB}, + {0x10FC, 0x10FC}, {0x10FD, 0x10FF}, {0x1160, 0x11FF}, + {0x1200, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, + {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, + {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, + {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, + {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, + {0x1318, 0x135A}, {0x135D, 0x135F}, {0x1360, 0x1368}, + {0x1369, 0x137C}, {0x1380, 0x138F}, {0x1390, 0x1399}, + {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x1400}, + {0x1401, 0x166C}, {0x166D, 0x166E}, {0x166F, 0x167F}, + {0x1680, 0x1680}, {0x1681, 0x169A}, {0x169B, 0x169B}, + {0x169C, 0x169C}, {0x16A0, 0x16EA}, {0x16EB, 0x16ED}, + {0x16EE, 0x16F0}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, + {0x170E, 0x1711}, {0x1712, 0x1714}, {0x1720, 0x1731}, + {0x1732, 0x1734}, {0x1735, 0x1736}, {0x1740, 0x1751}, + {0x1752, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17B3}, {0x17B4, 0x17B5}, + {0x17B6, 0x17B6}, {0x17B7, 0x17BD}, {0x17BE, 0x17C5}, + {0x17C6, 0x17C6}, {0x17C7, 0x17C8}, {0x17C9, 0x17D3}, + {0x17D4, 0x17D6}, {0x17D7, 0x17D7}, {0x17D8, 0x17DA}, + {0x17DB, 0x17DB}, {0x17DC, 0x17DC}, {0x17DD, 0x17DD}, + {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x1805}, + {0x1806, 0x1806}, {0x1807, 0x180A}, {0x180B, 0x180D}, + {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1842}, + {0x1843, 0x1843}, {0x1844, 0x1877}, {0x1880, 0x1884}, + {0x1885, 0x1886}, {0x1887, 0x18A8}, {0x18A9, 0x18A9}, + {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, + {0x1920, 0x1922}, {0x1923, 0x1926}, {0x1927, 0x1928}, + {0x1929, 0x192B}, {0x1930, 0x1931}, {0x1932, 0x1932}, + {0x1933, 0x1938}, {0x1939, 0x193B}, {0x1940, 0x1940}, + {0x1944, 0x1945}, {0x1946, 0x194F}, {0x1950, 0x196D}, + {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, + {0x19D0, 0x19D9}, {0x19DA, 0x19DA}, {0x19DE, 0x19DF}, + {0x19E0, 0x19FF}, {0x1A00, 0x1A16}, {0x1A17, 0x1A18}, + {0x1A19, 0x1A1A}, {0x1A1B, 0x1A1B}, {0x1A1E, 0x1A1F}, + {0x1A20, 0x1A54}, {0x1A55, 0x1A55}, {0x1A56, 0x1A56}, + {0x1A57, 0x1A57}, {0x1A58, 0x1A5E}, {0x1A60, 0x1A60}, + {0x1A61, 0x1A61}, {0x1A62, 0x1A62}, {0x1A63, 0x1A64}, + {0x1A65, 0x1A6C}, {0x1A6D, 0x1A72}, {0x1A73, 0x1A7C}, + {0x1A7F, 0x1A7F}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, + {0x1AA0, 0x1AA6}, {0x1AA7, 0x1AA7}, {0x1AA8, 0x1AAD}, + {0x1AB0, 0x1ABD}, {0x1ABE, 0x1ABE}, {0x1B00, 0x1B03}, + {0x1B04, 0x1B04}, {0x1B05, 0x1B33}, {0x1B34, 0x1B34}, + {0x1B35, 0x1B35}, {0x1B36, 0x1B3A}, {0x1B3B, 0x1B3B}, + {0x1B3C, 0x1B3C}, {0x1B3D, 0x1B41}, {0x1B42, 0x1B42}, + {0x1B43, 0x1B44}, {0x1B45, 0x1B4B}, {0x1B50, 0x1B59}, + {0x1B5A, 0x1B60}, {0x1B61, 0x1B6A}, {0x1B6B, 0x1B73}, + {0x1B74, 0x1B7C}, {0x1B80, 0x1B81}, {0x1B82, 0x1B82}, + {0x1B83, 0x1BA0}, {0x1BA1, 0x1BA1}, {0x1BA2, 0x1BA5}, + {0x1BA6, 0x1BA7}, {0x1BA8, 0x1BA9}, {0x1BAA, 0x1BAA}, + {0x1BAB, 0x1BAD}, {0x1BAE, 0x1BAF}, {0x1BB0, 0x1BB9}, + {0x1BBA, 0x1BBF}, {0x1BC0, 0x1BE5}, {0x1BE6, 0x1BE6}, + {0x1BE7, 0x1BE7}, {0x1BE8, 0x1BE9}, {0x1BEA, 0x1BEC}, + {0x1BED, 0x1BED}, {0x1BEE, 0x1BEE}, {0x1BEF, 0x1BF1}, + {0x1BF2, 0x1BF3}, {0x1BFC, 0x1BFF}, {0x1C00, 0x1C23}, + {0x1C24, 0x1C2B}, {0x1C2C, 0x1C33}, {0x1C34, 0x1C35}, + {0x1C36, 0x1C37}, {0x1C3B, 0x1C3F}, {0x1C40, 0x1C49}, + {0x1C4D, 0x1C4F}, {0x1C50, 0x1C59}, {0x1C5A, 0x1C77}, + {0x1C78, 0x1C7D}, {0x1C7E, 0x1C7F}, {0x1C80, 0x1C88}, + {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CD2}, {0x1CD3, 0x1CD3}, + {0x1CD4, 0x1CE0}, {0x1CE1, 0x1CE1}, {0x1CE2, 0x1CE8}, + {0x1CE9, 0x1CEC}, {0x1CED, 0x1CED}, {0x1CEE, 0x1CF1}, + {0x1CF2, 0x1CF3}, {0x1CF4, 0x1CF4}, {0x1CF5, 0x1CF6}, + {0x1CF8, 0x1CF9}, {0x1D00, 0x1D2B}, {0x1D2C, 0x1D6A}, + {0x1D6B, 0x1D77}, {0x1D78, 0x1D78}, {0x1D79, 0x1D7F}, + {0x1D80, 0x1D9A}, {0x1D9B, 0x1DBF}, {0x1DC0, 0x1DF5}, + {0x1DFB, 0x1DFF}, {0x1E00, 0x1EFF}, {0x1F00, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FBC}, {0x1FBD, 0x1FBD}, {0x1FBE, 0x1FBE}, + {0x1FBF, 0x1FC1}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC}, + {0x1FCD, 0x1FCF}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FDF}, {0x1FE0, 0x1FEC}, {0x1FED, 0x1FEF}, + {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x1FFD, 0x1FFE}, + {0x2000, 0x200A}, {0x200B, 0x200F}, {0x2011, 0x2012}, + {0x2017, 0x2017}, {0x201A, 0x201A}, {0x201B, 0x201B}, + {0x201E, 0x201E}, {0x201F, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x2028}, {0x2029, 0x2029}, {0x202A, 0x202E}, + {0x202F, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x2038}, {0x2039, 0x2039}, {0x203A, 0x203A}, + {0x203C, 0x203D}, {0x203F, 0x2040}, {0x2041, 0x2043}, + {0x2044, 0x2044}, {0x2045, 0x2045}, {0x2046, 0x2046}, + {0x2047, 0x2051}, {0x2052, 0x2052}, {0x2053, 0x2053}, + {0x2054, 0x2054}, {0x2055, 0x205E}, {0x205F, 0x205F}, + {0x2060, 0x2064}, {0x2066, 0x206F}, {0x2070, 0x2070}, + {0x2071, 0x2071}, {0x2075, 0x2079}, {0x207A, 0x207C}, + {0x207D, 0x207D}, {0x207E, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x2089}, {0x208A, 0x208C}, {0x208D, 0x208D}, + {0x208E, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20DC}, + {0x20DD, 0x20E0}, {0x20E1, 0x20E1}, {0x20E2, 0x20E4}, + {0x20E5, 0x20F0}, {0x2100, 0x2101}, {0x2102, 0x2102}, + {0x2104, 0x2104}, {0x2106, 0x2106}, {0x2107, 0x2107}, + {0x2108, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2114}, + {0x2115, 0x2115}, {0x2117, 0x2117}, {0x2118, 0x2118}, + {0x2119, 0x211D}, {0x211E, 0x2120}, {0x2123, 0x2123}, + {0x2124, 0x2124}, {0x2125, 0x2125}, {0x2127, 0x2127}, + {0x2128, 0x2128}, {0x2129, 0x2129}, {0x212A, 0x212A}, + {0x212C, 0x212D}, {0x212E, 0x212E}, {0x212F, 0x2134}, + {0x2135, 0x2138}, {0x2139, 0x2139}, {0x213A, 0x213B}, + {0x213C, 0x213F}, {0x2140, 0x2144}, {0x2145, 0x2149}, + {0x214A, 0x214A}, {0x214B, 0x214B}, {0x214C, 0x214D}, + {0x214E, 0x214E}, {0x214F, 0x214F}, {0x2150, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2182}, {0x2183, 0x2184}, {0x2185, 0x2188}, + {0x218A, 0x218B}, {0x219A, 0x219B}, {0x219C, 0x219F}, + {0x21A0, 0x21A0}, {0x21A1, 0x21A2}, {0x21A3, 0x21A3}, + {0x21A4, 0x21A5}, {0x21A6, 0x21A6}, {0x21A7, 0x21AD}, + {0x21AE, 0x21AE}, {0x21AF, 0x21B7}, {0x21BA, 0x21CD}, + {0x21CE, 0x21CF}, {0x21D0, 0x21D1}, {0x21D3, 0x21D3}, + {0x21D5, 0x21E6}, {0x21E8, 0x21F3}, {0x21F4, 0x21FF}, + {0x2201, 0x2201}, {0x2204, 0x2206}, {0x2209, 0x220A}, + {0x220C, 0x220E}, {0x2210, 0x2210}, {0x2212, 0x2214}, + {0x2216, 0x2219}, {0x221B, 0x221C}, {0x2221, 0x2222}, + {0x2224, 0x2224}, {0x2226, 0x2226}, {0x222D, 0x222D}, + {0x222F, 0x2233}, {0x2238, 0x223B}, {0x223E, 0x2247}, + {0x2249, 0x224B}, {0x224D, 0x2251}, {0x2253, 0x225F}, + {0x2262, 0x2263}, {0x2268, 0x2269}, {0x226C, 0x226D}, + {0x2270, 0x2281}, {0x2284, 0x2285}, {0x2288, 0x2294}, + {0x2296, 0x2298}, {0x229A, 0x22A4}, {0x22A6, 0x22BE}, + {0x22C0, 0x22FF}, {0x2300, 0x2307}, {0x2308, 0x2308}, + {0x2309, 0x2309}, {0x230A, 0x230A}, {0x230B, 0x230B}, + {0x230C, 0x2311}, {0x2313, 0x2319}, {0x231C, 0x231F}, + {0x2320, 0x2321}, {0x2322, 0x2328}, {0x232B, 0x237B}, + {0x237C, 0x237C}, {0x237D, 0x239A}, {0x239B, 0x23B3}, + {0x23B4, 0x23DB}, {0x23DC, 0x23E1}, {0x23E2, 0x23E8}, + {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, + {0x2400, 0x2426}, {0x2440, 0x244A}, {0x24EA, 0x24EA}, + {0x254C, 0x254F}, {0x2574, 0x257F}, {0x2590, 0x2591}, + {0x2596, 0x259F}, {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, + {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, + {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, + {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, {0x25F0, 0x25F7}, + {0x25F8, 0x25FC}, {0x25FF, 0x25FF}, {0x2600, 0x2604}, + {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, + {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, + {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, + {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, + {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, + {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, + {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, + {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, + {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, + {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, + {0x2758, 0x2767}, {0x2768, 0x2768}, {0x2769, 0x2769}, + {0x276A, 0x276A}, {0x276B, 0x276B}, {0x276C, 0x276C}, + {0x276D, 0x276D}, {0x276E, 0x276E}, {0x276F, 0x276F}, + {0x2770, 0x2770}, {0x2771, 0x2771}, {0x2772, 0x2772}, + {0x2773, 0x2773}, {0x2774, 0x2774}, {0x2775, 0x2775}, + {0x2780, 0x2793}, {0x2794, 0x2794}, {0x2798, 0x27AF}, + {0x27B1, 0x27BE}, {0x27C0, 0x27C4}, {0x27C5, 0x27C5}, + {0x27C6, 0x27C6}, {0x27C7, 0x27E5}, {0x27EE, 0x27EE}, + {0x27EF, 0x27EF}, {0x27F0, 0x27FF}, {0x2800, 0x28FF}, + {0x2900, 0x297F}, {0x2980, 0x2982}, {0x2983, 0x2983}, + {0x2984, 0x2984}, {0x2987, 0x2987}, {0x2988, 0x2988}, + {0x2989, 0x2989}, {0x298A, 0x298A}, {0x298B, 0x298B}, + {0x298C, 0x298C}, {0x298D, 0x298D}, {0x298E, 0x298E}, + {0x298F, 0x298F}, {0x2990, 0x2990}, {0x2991, 0x2991}, + {0x2992, 0x2992}, {0x2993, 0x2993}, {0x2994, 0x2994}, + {0x2995, 0x2995}, {0x2996, 0x2996}, {0x2997, 0x2997}, + {0x2998, 0x2998}, {0x2999, 0x29D7}, {0x29D8, 0x29D8}, + {0x29D9, 0x29D9}, {0x29DA, 0x29DA}, {0x29DB, 0x29DB}, + {0x29DC, 0x29FB}, {0x29FC, 0x29FC}, {0x29FD, 0x29FD}, + {0x29FE, 0x29FF}, {0x2A00, 0x2AFF}, {0x2B00, 0x2B1A}, + {0x2B1D, 0x2B2F}, {0x2B30, 0x2B44}, {0x2B45, 0x2B46}, + {0x2B47, 0x2B4C}, {0x2B4D, 0x2B4F}, {0x2B51, 0x2B54}, + {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, + {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, + {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2C7B}, + {0x2C7C, 0x2C7D}, {0x2C7E, 0x2C7F}, {0x2C80, 0x2CE4}, + {0x2CE5, 0x2CEA}, {0x2CEB, 0x2CEE}, {0x2CEF, 0x2CF1}, + {0x2CF2, 0x2CF3}, {0x2CF9, 0x2CFC}, {0x2CFD, 0x2CFD}, + {0x2CFE, 0x2CFF}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, + {0x2D70, 0x2D70}, {0x2D7F, 0x2D7F}, {0x2D80, 0x2D96}, + {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, + {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, + {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2DFF}, + {0x2E00, 0x2E01}, {0x2E02, 0x2E02}, {0x2E03, 0x2E03}, + {0x2E04, 0x2E04}, {0x2E05, 0x2E05}, {0x2E06, 0x2E08}, + {0x2E09, 0x2E09}, {0x2E0A, 0x2E0A}, {0x2E0B, 0x2E0B}, + {0x2E0C, 0x2E0C}, {0x2E0D, 0x2E0D}, {0x2E0E, 0x2E16}, + {0x2E17, 0x2E17}, {0x2E18, 0x2E19}, {0x2E1A, 0x2E1A}, + {0x2E1B, 0x2E1B}, {0x2E1C, 0x2E1C}, {0x2E1D, 0x2E1D}, + {0x2E1E, 0x2E1F}, {0x2E20, 0x2E20}, {0x2E21, 0x2E21}, + {0x2E22, 0x2E22}, {0x2E23, 0x2E23}, {0x2E24, 0x2E24}, + {0x2E25, 0x2E25}, {0x2E26, 0x2E26}, {0x2E27, 0x2E27}, + {0x2E28, 0x2E28}, {0x2E29, 0x2E29}, {0x2E2A, 0x2E2E}, + {0x2E2F, 0x2E2F}, {0x2E30, 0x2E39}, {0x2E3A, 0x2E3B}, + {0x2E3C, 0x2E3F}, {0x2E40, 0x2E40}, {0x2E41, 0x2E41}, + {0x2E42, 0x2E42}, {0x2E43, 0x2E44}, {0x303F, 0x303F}, + {0x4DC0, 0x4DFF}, {0xA4D0, 0xA4F7}, {0xA4F8, 0xA4FD}, + {0xA4FE, 0xA4FF}, {0xA500, 0xA60B}, {0xA60C, 0xA60C}, + {0xA60D, 0xA60F}, {0xA610, 0xA61F}, {0xA620, 0xA629}, + {0xA62A, 0xA62B}, {0xA640, 0xA66D}, {0xA66E, 0xA66E}, + {0xA66F, 0xA66F}, {0xA670, 0xA672}, {0xA673, 0xA673}, + {0xA674, 0xA67D}, {0xA67E, 0xA67E}, {0xA67F, 0xA67F}, + {0xA680, 0xA69B}, {0xA69C, 0xA69D}, {0xA69E, 0xA69F}, + {0xA6A0, 0xA6E5}, {0xA6E6, 0xA6EF}, {0xA6F0, 0xA6F1}, + {0xA6F2, 0xA6F7}, {0xA700, 0xA716}, {0xA717, 0xA71F}, + {0xA720, 0xA721}, {0xA722, 0xA76F}, {0xA770, 0xA770}, + {0xA771, 0xA787}, {0xA788, 0xA788}, {0xA789, 0xA78A}, + {0xA78B, 0xA78E}, {0xA78F, 0xA78F}, {0xA790, 0xA7AE}, + {0xA7B0, 0xA7B7}, {0xA7F7, 0xA7F7}, {0xA7F8, 0xA7F9}, + {0xA7FA, 0xA7FA}, {0xA7FB, 0xA7FF}, {0xA800, 0xA801}, + {0xA802, 0xA802}, {0xA803, 0xA805}, {0xA806, 0xA806}, + {0xA807, 0xA80A}, {0xA80B, 0xA80B}, {0xA80C, 0xA822}, + {0xA823, 0xA824}, {0xA825, 0xA826}, {0xA827, 0xA827}, + {0xA828, 0xA82B}, {0xA830, 0xA835}, {0xA836, 0xA837}, + {0xA838, 0xA838}, {0xA839, 0xA839}, {0xA840, 0xA873}, + {0xA874, 0xA877}, {0xA880, 0xA881}, {0xA882, 0xA8B3}, + {0xA8B4, 0xA8C3}, {0xA8C4, 0xA8C5}, {0xA8CE, 0xA8CF}, + {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8F2, 0xA8F7}, + {0xA8F8, 0xA8FA}, {0xA8FB, 0xA8FB}, {0xA8FC, 0xA8FC}, + {0xA8FD, 0xA8FD}, {0xA900, 0xA909}, {0xA90A, 0xA925}, + {0xA926, 0xA92D}, {0xA92E, 0xA92F}, {0xA930, 0xA946}, + {0xA947, 0xA951}, {0xA952, 0xA953}, {0xA95F, 0xA95F}, + {0xA980, 0xA982}, {0xA983, 0xA983}, {0xA984, 0xA9B2}, + {0xA9B3, 0xA9B3}, {0xA9B4, 0xA9B5}, {0xA9B6, 0xA9B9}, + {0xA9BA, 0xA9BB}, {0xA9BC, 0xA9BC}, {0xA9BD, 0xA9C0}, + {0xA9C1, 0xA9CD}, {0xA9CF, 0xA9CF}, {0xA9D0, 0xA9D9}, + {0xA9DE, 0xA9DF}, {0xA9E0, 0xA9E4}, {0xA9E5, 0xA9E5}, + {0xA9E6, 0xA9E6}, {0xA9E7, 0xA9EF}, {0xA9F0, 0xA9F9}, + {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA29, 0xAA2E}, + {0xAA2F, 0xAA30}, {0xAA31, 0xAA32}, {0xAA33, 0xAA34}, + {0xAA35, 0xAA36}, {0xAA40, 0xAA42}, {0xAA43, 0xAA43}, + {0xAA44, 0xAA4B}, {0xAA4C, 0xAA4C}, {0xAA4D, 0xAA4D}, + {0xAA50, 0xAA59}, {0xAA5C, 0xAA5F}, {0xAA60, 0xAA6F}, + {0xAA70, 0xAA70}, {0xAA71, 0xAA76}, {0xAA77, 0xAA79}, + {0xAA7A, 0xAA7A}, {0xAA7B, 0xAA7B}, {0xAA7C, 0xAA7C}, + {0xAA7D, 0xAA7D}, {0xAA7E, 0xAA7F}, {0xAA80, 0xAAAF}, + {0xAAB0, 0xAAB0}, {0xAAB1, 0xAAB1}, {0xAAB2, 0xAAB4}, + {0xAAB5, 0xAAB6}, {0xAAB7, 0xAAB8}, {0xAAB9, 0xAABD}, + {0xAABE, 0xAABF}, {0xAAC0, 0xAAC0}, {0xAAC1, 0xAAC1}, + {0xAAC2, 0xAAC2}, {0xAADB, 0xAADC}, {0xAADD, 0xAADD}, + {0xAADE, 0xAADF}, {0xAAE0, 0xAAEA}, {0xAAEB, 0xAAEB}, + {0xAAEC, 0xAAED}, {0xAAEE, 0xAAEF}, {0xAAF0, 0xAAF1}, + {0xAAF2, 0xAAF2}, {0xAAF3, 0xAAF4}, {0xAAF5, 0xAAF5}, + {0xAAF6, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB5A}, {0xAB5B, 0xAB5B}, {0xAB5C, 0xAB5F}, + {0xAB60, 0xAB65}, {0xAB70, 0xABBF}, {0xABC0, 0xABE2}, + {0xABE3, 0xABE4}, {0xABE5, 0xABE5}, {0xABE6, 0xABE7}, + {0xABE8, 0xABE8}, {0xABE9, 0xABEA}, {0xABEB, 0xABEB}, + {0xABEC, 0xABEC}, {0xABED, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDB7F}, + {0xDB80, 0xDBFF}, {0xDC00, 0xDFFF}, {0xFB00, 0xFB06}, + {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1E, 0xFB1E}, + {0xFB1F, 0xFB28}, {0xFB29, 0xFB29}, {0xFB2A, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFB4F}, {0xFB50, 0xFBB1}, + {0xFBB2, 0xFBC1}, {0xFBD3, 0xFD3D}, {0xFD3E, 0xFD3E}, + {0xFD3F, 0xFD3F}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, + {0xFDF0, 0xFDFB}, {0xFDFC, 0xFDFC}, {0xFDFD, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFC, 0xFFFC}, + {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, + {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, + {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, + {0x10137, 0x1013F}, {0x10140, 0x10174}, {0x10175, 0x10178}, + {0x10179, 0x10189}, {0x1018A, 0x1018B}, {0x1018C, 0x1018E}, + {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, + {0x101FD, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, + {0x102E0, 0x102E0}, {0x102E1, 0x102FB}, {0x10300, 0x1031F}, + {0x10320, 0x10323}, {0x10330, 0x10340}, {0x10341, 0x10341}, + {0x10342, 0x10349}, {0x1034A, 0x1034A}, {0x10350, 0x10375}, + {0x10376, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x1039F}, + {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D0, 0x103D0}, + {0x103D1, 0x103D5}, {0x10400, 0x1044F}, {0x10450, 0x1047F}, + {0x10480, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x1083F}, {0x10840, 0x10855}, {0x10857, 0x10857}, + {0x10858, 0x1085F}, {0x10860, 0x10876}, {0x10877, 0x10878}, + {0x10879, 0x1087F}, {0x10880, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x108FF}, + {0x10900, 0x10915}, {0x10916, 0x1091B}, {0x1091F, 0x1091F}, + {0x10920, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x1099F}, + {0x109A0, 0x109B7}, {0x109BC, 0x109BD}, {0x109BE, 0x109BF}, + {0x109C0, 0x109CF}, {0x109D2, 0x109FF}, {0x10A00, 0x10A00}, + {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, + {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x10A40, 0x10A47}, + {0x10A50, 0x10A58}, {0x10A60, 0x10A7C}, {0x10A7D, 0x10A7E}, + {0x10A7F, 0x10A7F}, {0x10A80, 0x10A9C}, {0x10A9D, 0x10A9F}, + {0x10AC0, 0x10AC7}, {0x10AC8, 0x10AC8}, {0x10AC9, 0x10AE4}, + {0x10AE5, 0x10AE6}, {0x10AEB, 0x10AEF}, {0x10AF0, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B3F}, {0x10B40, 0x10B55}, + {0x10B58, 0x10B5F}, {0x10B60, 0x10B72}, {0x10B78, 0x10B7F}, + {0x10B80, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x11000}, + {0x11001, 0x11001}, {0x11002, 0x11002}, {0x11003, 0x11037}, + {0x11038, 0x11046}, {0x11047, 0x1104D}, {0x11052, 0x11065}, + {0x11066, 0x1106F}, {0x1107F, 0x1107F}, {0x11080, 0x11081}, + {0x11082, 0x11082}, {0x11083, 0x110AF}, {0x110B0, 0x110B2}, + {0x110B3, 0x110B6}, {0x110B7, 0x110B8}, {0x110B9, 0x110BA}, + {0x110BB, 0x110BC}, {0x110BD, 0x110BD}, {0x110BE, 0x110C1}, + {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, {0x11100, 0x11102}, + {0x11103, 0x11126}, {0x11127, 0x1112B}, {0x1112C, 0x1112C}, + {0x1112D, 0x11134}, {0x11136, 0x1113F}, {0x11140, 0x11143}, + {0x11150, 0x11172}, {0x11173, 0x11173}, {0x11174, 0x11175}, + {0x11176, 0x11176}, {0x11180, 0x11181}, {0x11182, 0x11182}, + {0x11183, 0x111B2}, {0x111B3, 0x111B5}, {0x111B6, 0x111BE}, + {0x111BF, 0x111C0}, {0x111C1, 0x111C4}, {0x111C5, 0x111C9}, + {0x111CA, 0x111CC}, {0x111CD, 0x111CD}, {0x111D0, 0x111D9}, + {0x111DA, 0x111DA}, {0x111DB, 0x111DB}, {0x111DC, 0x111DC}, + {0x111DD, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, + {0x11213, 0x1122B}, {0x1122C, 0x1122E}, {0x1122F, 0x11231}, + {0x11232, 0x11233}, {0x11234, 0x11234}, {0x11235, 0x11235}, + {0x11236, 0x11237}, {0x11238, 0x1123D}, {0x1123E, 0x1123E}, + {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, + {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112A9, 0x112A9}, + {0x112B0, 0x112DE}, {0x112DF, 0x112DF}, {0x112E0, 0x112E2}, + {0x112E3, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11301}, + {0x11302, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133C, 0x1133C}, {0x1133D, 0x1133D}, + {0x1133E, 0x1133F}, {0x11340, 0x11340}, {0x11341, 0x11344}, + {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, + {0x11357, 0x11357}, {0x1135D, 0x11361}, {0x11362, 0x11363}, + {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x11434}, + {0x11435, 0x11437}, {0x11438, 0x1143F}, {0x11440, 0x11441}, + {0x11442, 0x11444}, {0x11445, 0x11445}, {0x11446, 0x11446}, + {0x11447, 0x1144A}, {0x1144B, 0x1144F}, {0x11450, 0x11459}, + {0x1145B, 0x1145B}, {0x1145D, 0x1145D}, {0x11480, 0x114AF}, + {0x114B0, 0x114B2}, {0x114B3, 0x114B8}, {0x114B9, 0x114B9}, + {0x114BA, 0x114BA}, {0x114BB, 0x114BE}, {0x114BF, 0x114C0}, + {0x114C1, 0x114C1}, {0x114C2, 0x114C3}, {0x114C4, 0x114C5}, + {0x114C6, 0x114C6}, {0x114C7, 0x114C7}, {0x114D0, 0x114D9}, + {0x11580, 0x115AE}, {0x115AF, 0x115B1}, {0x115B2, 0x115B5}, + {0x115B8, 0x115BB}, {0x115BC, 0x115BD}, {0x115BE, 0x115BE}, + {0x115BF, 0x115C0}, {0x115C1, 0x115D7}, {0x115D8, 0x115DB}, + {0x115DC, 0x115DD}, {0x11600, 0x1162F}, {0x11630, 0x11632}, + {0x11633, 0x1163A}, {0x1163B, 0x1163C}, {0x1163D, 0x1163D}, + {0x1163E, 0x1163E}, {0x1163F, 0x11640}, {0x11641, 0x11643}, + {0x11644, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116AA}, {0x116AB, 0x116AB}, {0x116AC, 0x116AC}, + {0x116AD, 0x116AD}, {0x116AE, 0x116AF}, {0x116B0, 0x116B5}, + {0x116B6, 0x116B6}, {0x116B7, 0x116B7}, {0x116C0, 0x116C9}, + {0x11700, 0x11719}, {0x1171D, 0x1171F}, {0x11720, 0x11721}, + {0x11722, 0x11725}, {0x11726, 0x11726}, {0x11727, 0x1172B}, + {0x11730, 0x11739}, {0x1173A, 0x1173B}, {0x1173C, 0x1173E}, + {0x1173F, 0x1173F}, {0x118A0, 0x118DF}, {0x118E0, 0x118E9}, + {0x118EA, 0x118F2}, {0x118FF, 0x118FF}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C2F, 0x11C2F}, + {0x11C30, 0x11C36}, {0x11C38, 0x11C3D}, {0x11C3E, 0x11C3E}, + {0x11C3F, 0x11C3F}, {0x11C40, 0x11C40}, {0x11C41, 0x11C45}, + {0x11C50, 0x11C59}, {0x11C5A, 0x11C6C}, {0x11C70, 0x11C71}, + {0x11C72, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CA9}, + {0x11CAA, 0x11CB0}, {0x11CB1, 0x11CB1}, {0x11CB2, 0x11CB3}, + {0x11CB4, 0x11CB4}, {0x11CB5, 0x11CB6}, {0x12000, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, + {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, + {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF4}, {0x16AF5, 0x16AF5}, + {0x16B00, 0x16B2F}, {0x16B30, 0x16B36}, {0x16B37, 0x16B3B}, + {0x16B3C, 0x16B3F}, {0x16B40, 0x16B43}, {0x16B44, 0x16B44}, + {0x16B45, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, + {0x16F50, 0x16F50}, {0x16F51, 0x16F7E}, {0x16F8F, 0x16F92}, + {0x16F93, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, + {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BC9C}, + {0x1BC9D, 0x1BC9E}, {0x1BC9F, 0x1BC9F}, {0x1BCA0, 0x1BCA3}, + {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, + {0x1D165, 0x1D166}, {0x1D167, 0x1D169}, {0x1D16A, 0x1D16C}, + {0x1D16D, 0x1D172}, {0x1D173, 0x1D17A}, {0x1D17B, 0x1D182}, + {0x1D183, 0x1D184}, {0x1D185, 0x1D18B}, {0x1D18C, 0x1D1A9}, + {0x1D1AA, 0x1D1AD}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, + {0x1D242, 0x1D244}, {0x1D245, 0x1D245}, {0x1D300, 0x1D356}, + {0x1D360, 0x1D371}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, + {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, + {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, + {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, + {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, + {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, + {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, + {0x1D6C1, 0x1D6C1}, {0x1D6C2, 0x1D6DA}, {0x1D6DB, 0x1D6DB}, + {0x1D6DC, 0x1D6FA}, {0x1D6FB, 0x1D6FB}, {0x1D6FC, 0x1D714}, + {0x1D715, 0x1D715}, {0x1D716, 0x1D734}, {0x1D735, 0x1D735}, + {0x1D736, 0x1D74E}, {0x1D74F, 0x1D74F}, {0x1D750, 0x1D76E}, + {0x1D76F, 0x1D76F}, {0x1D770, 0x1D788}, {0x1D789, 0x1D789}, + {0x1D78A, 0x1D7A8}, {0x1D7A9, 0x1D7A9}, {0x1D7AA, 0x1D7C2}, + {0x1D7C3, 0x1D7C3}, {0x1D7C4, 0x1D7CB}, {0x1D7CE, 0x1D7FF}, + {0x1D800, 0x1D9FF}, {0x1DA00, 0x1DA36}, {0x1DA37, 0x1DA3A}, + {0x1DA3B, 0x1DA6C}, {0x1DA6D, 0x1DA74}, {0x1DA75, 0x1DA75}, + {0x1DA76, 0x1DA83}, {0x1DA84, 0x1DA84}, {0x1DA85, 0x1DA86}, + {0x1DA87, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, + {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, + {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, + {0x1E8C7, 0x1E8CF}, {0x1E8D0, 0x1E8D6}, {0x1E900, 0x1E943}, + {0x1E944, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, + {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, + {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, + {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, + {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, + {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, + {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, + {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, + {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, + {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, + {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, + {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, + {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, + {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, + {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, + {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, + {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, + {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, + {0xE0020, 0xE007F}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{EastAsianWidth} +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || + inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || + inTables(r, doublewidth, emoji): + return 2 + default: + return 1 + } +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 0000000..0ce32c5 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,8 @@ +// +build js + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 0000000..c579e9a --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,77 @@ +// +build !windows,!js + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 0000000..0258876 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,25 @@ +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 0000000..2298515 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 0000000..60ae311 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod new file mode 100644 index 0000000..2ae411b --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-wordwrap diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 0000000..ac67205 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/nsf/termbox-go/AUTHORS b/vendor/github.com/nsf/termbox-go/AUTHORS new file mode 100644 index 0000000..fe26fb0 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/AUTHORS @@ -0,0 +1,4 @@ +# Please keep this file sorted. + +Georg Reinke +nsf diff --git a/vendor/github.com/nsf/termbox-go/LICENSE b/vendor/github.com/nsf/termbox-go/LICENSE new file mode 100644 index 0000000..d9bc068 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2012 termbox-go authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nsf/termbox-go/README.md b/vendor/github.com/nsf/termbox-go/README.md new file mode 100644 index 0000000..f4b805f --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/README.md @@ -0,0 +1,44 @@ +[![GoDoc](https://godoc.org/github.com/nsf/termbox-go?status.svg)](http://godoc.org/github.com/nsf/termbox-go) + +## Termbox +Termbox is a library that provides a minimalistic API which allows the programmer to write text-based user interfaces. The library is crossplatform and has both terminal-based implementations on *nix operating systems and a winapi console based implementation for windows operating systems. The basic idea is an abstraction of the greatest common subset of features available on all major terminals and other terminal-like APIs in a minimalistic fashion. Small API means it is easy to implement, test, maintain and learn it, that's what makes the termbox a distinct library in its area. + +### Installation +Install and update this go package with `go get -u github.com/nsf/termbox-go` + +### Examples +For examples of what can be done take a look at demos in the _demos directory. You can try them with go run: `go run _demos/keyboard.go` + +There are also some interesting projects using termbox-go: + - [godit](https://github.com/nsf/godit) is an emacsish lightweight text editor written using termbox. + - [gotetris](https://github.com/jjinux/gotetris) is an implementation of Tetris. + - [sokoban-go](https://github.com/rn2dy/sokoban-go) is an implementation of sokoban game. + - [hecate](https://github.com/evanmiller/hecate) is a hex editor designed by Satan. + - [httopd](https://github.com/verdverm/httopd) is top for httpd logs. + - [mop](https://github.com/mop-tracker/mop) is stock market tracker for hackers. + - [termui](https://github.com/gizak/termui) is a terminal dashboard. + - [termloop](https://github.com/JoelOtter/termloop) is a terminal game engine. + - [xterm-color-chart](https://github.com/kutuluk/xterm-color-chart) is a XTerm 256 color chart. + - [gocui](https://github.com/jroimartin/gocui) is a minimalist Go library aimed at creating console user interfaces. + - [dry](https://github.com/moncho/dry) is an interactive cli to manage Docker containers. + - [pxl](https://github.com/ichinaski/pxl) displays images in the terminal. + - [snake-game](https://github.com/DyegoCosta/snake-game) is an implementation of the Snake game. + - [gone](https://github.com/guillaumebreton/gone) is a CLI pomodoro® timer. + - [Spoof.go](https://github.com/sabey/spoofgo) controllable movement spoofing from the cli + - [lf](https://github.com/gokcehan/lf) is a terminal file manager + - [rat](https://github.com/ericfreese/rat) lets you compose shell commands to build terminal applications. + - [httplab](https://github.com/gchaincl/httplab) An interactive web server. + - [tetris](https://github.com/MichaelS11/tetris) Go Tetris with AI option + - [wot](https://github.com/kyu-suke/wot) Wait time during command is completed. + - [2048-go](https://github.com/1984weed/2048-go) is 2048 in Go + - [jv](https://github.com/maxzender/jv) helps you view JSON on the command-line. + - [pinger](https://github.com/hirose31/pinger) helps you to monitor numerous hosts using ICMP ECHO_REQUEST. + - [vixl44](https://github.com/sebashwa/vixl44) lets you create pixel art inside your terminal using vim movements + - [zterm](https://github.com/varunrau/zterm) is a typing game inspired by http://zty.pe/ + - [gotypist](https://github.com/pb-/gotypist) is a fun touch-typing tutor following Steve Yegge's method. + - [cointop](https://github.com/miguelmota/cointop) is an interactive terminal based UI application for tracking cryptocurrencies. + - [pexpo](https://github.com/nnao45/pexpo) is a terminal sending ping tool written in Go. + - [jid](https://github.com/simeji/jid) is an interactive JSON drill down tool using filtering queries like jq. + +### API reference +[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go) diff --git a/vendor/github.com/nsf/termbox-go/api.go b/vendor/github.com/nsf/termbox-go/api.go new file mode 100644 index 0000000..d530ab5 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/api.go @@ -0,0 +1,489 @@ +// +build !windows + +package termbox + +import "github.com/mattn/go-runewidth" +import "fmt" +import "os" +import "os/signal" +import "syscall" +import "runtime" +import "time" + +// public API + +// Initializes termbox library. This function should be called before any other functions. +// After successful initialization, the library must be finalized using 'Close' function. +// +// Example usage: +// err := termbox.Init() +// if err != nil { +// panic(err) +// } +// defer termbox.Close() +func Init() error { + var err error + + out, err = os.OpenFile("/dev/tty", syscall.O_WRONLY, 0) + if err != nil { + return err + } + in, err = syscall.Open("/dev/tty", syscall.O_RDONLY, 0) + if err != nil { + return err + } + + err = setup_term() + if err != nil { + return fmt.Errorf("termbox: error while reading terminfo data: %v", err) + } + + signal.Notify(sigwinch, syscall.SIGWINCH) + signal.Notify(sigio, syscall.SIGIO) + + _, err = fcntl(in, syscall.F_SETFL, syscall.O_ASYNC|syscall.O_NONBLOCK) + if err != nil { + return err + } + _, err = fcntl(in, syscall.F_SETOWN, syscall.Getpid()) + if runtime.GOOS != "darwin" && err != nil { + return err + } + err = tcgetattr(out.Fd(), &orig_tios) + if err != nil { + return err + } + + tios := orig_tios + tios.Iflag &^= syscall_IGNBRK | syscall_BRKINT | syscall_PARMRK | + syscall_ISTRIP | syscall_INLCR | syscall_IGNCR | + syscall_ICRNL | syscall_IXON + tios.Lflag &^= syscall_ECHO | syscall_ECHONL | syscall_ICANON | + syscall_ISIG | syscall_IEXTEN + tios.Cflag &^= syscall_CSIZE | syscall_PARENB + tios.Cflag |= syscall_CS8 + tios.Cc[syscall_VMIN] = 1 + tios.Cc[syscall_VTIME] = 0 + + err = tcsetattr(out.Fd(), &tios) + if err != nil { + return err + } + + out.WriteString(funcs[t_enter_ca]) + out.WriteString(funcs[t_enter_keypad]) + out.WriteString(funcs[t_hide_cursor]) + out.WriteString(funcs[t_clear_screen]) + + termw, termh = get_term_size(out.Fd()) + back_buffer.init(termw, termh) + front_buffer.init(termw, termh) + back_buffer.clear() + front_buffer.clear() + + go func() { + buf := make([]byte, 128) + for { + select { + case <-sigio: + for { + n, err := syscall.Read(in, buf) + if err == syscall.EAGAIN || err == syscall.EWOULDBLOCK { + break + } + select { + case input_comm <- input_event{buf[:n], err}: + ie := <-input_comm + buf = ie.data[:128] + case <-quit: + return + } + } + case <-quit: + return + } + } + }() + + IsInit = true + return nil +} + +// Interrupt an in-progress call to PollEvent by causing it to return +// EventInterrupt. Note that this function will block until the PollEvent +// function has successfully been interrupted. +func Interrupt() { + interrupt_comm <- struct{}{} +} + +// Finalizes termbox library, should be called after successful initialization +// when termbox's functionality isn't required anymore. +func Close() { + quit <- 1 + out.WriteString(funcs[t_show_cursor]) + out.WriteString(funcs[t_sgr0]) + out.WriteString(funcs[t_clear_screen]) + out.WriteString(funcs[t_exit_ca]) + out.WriteString(funcs[t_exit_keypad]) + out.WriteString(funcs[t_exit_mouse]) + tcsetattr(out.Fd(), &orig_tios) + + out.Close() + syscall.Close(in) + + // reset the state, so that on next Init() it will work again + termw = 0 + termh = 0 + input_mode = InputEsc + out = nil + in = 0 + lastfg = attr_invalid + lastbg = attr_invalid + lastx = coord_invalid + lasty = coord_invalid + cursor_x = cursor_hidden + cursor_y = cursor_hidden + foreground = ColorDefault + background = ColorDefault + IsInit = false +} + +// Synchronizes the internal back buffer with the terminal. +func Flush() error { + // invalidate cursor position + lastx = coord_invalid + lasty = coord_invalid + + update_size_maybe() + + for y := 0; y < front_buffer.height; y++ { + line_offset := y * front_buffer.width + for x := 0; x < front_buffer.width; { + cell_offset := line_offset + x + back := &back_buffer.cells[cell_offset] + front := &front_buffer.cells[cell_offset] + if back.Ch < ' ' { + back.Ch = ' ' + } + w := runewidth.RuneWidth(back.Ch) + if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) { + w = 1 + } + if *back == *front { + x += w + continue + } + *front = *back + send_attr(back.Fg, back.Bg) + + if w == 2 && x == front_buffer.width-1 { + // there's not enough space for 2-cells rune, + // let's just put a space in there + send_char(x, y, ' ') + } else { + send_char(x, y, back.Ch) + if w == 2 { + next := cell_offset + 1 + front_buffer.cells[next] = Cell{ + Ch: 0, + Fg: back.Fg, + Bg: back.Bg, + } + } + } + x += w + } + } + if !is_cursor_hidden(cursor_x, cursor_y) { + write_cursor(cursor_x, cursor_y) + } + return flush() +} + +// Sets the position of the cursor. See also HideCursor(). +func SetCursor(x, y int) { + if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) { + outbuf.WriteString(funcs[t_show_cursor]) + } + + if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) { + outbuf.WriteString(funcs[t_hide_cursor]) + } + + cursor_x, cursor_y = x, y + if !is_cursor_hidden(cursor_x, cursor_y) { + write_cursor(cursor_x, cursor_y) + } +} + +// The shortcut for SetCursor(-1, -1). +func HideCursor() { + SetCursor(cursor_hidden, cursor_hidden) +} + +// Changes cell's parameters in the internal back buffer at the specified +// position. +func SetCell(x, y int, ch rune, fg, bg Attribute) { + if x < 0 || x >= back_buffer.width { + return + } + if y < 0 || y >= back_buffer.height { + return + } + + back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg} +} + +// Returns a slice into the termbox's back buffer. You can get its dimensions +// using 'Size' function. The slice remains valid as long as no 'Clear' or +// 'Flush' function calls were made after call to this function. +func CellBuffer() []Cell { + return back_buffer.cells +} + +// After getting a raw event from PollRawEvent function call, you can parse it +// again into an ordinary one using termbox logic. That is parse an event as +// termbox would do it. Returned event in addition to usual Event struct fields +// sets N field to the amount of bytes used within 'data' slice. If the length +// of 'data' slice is zero or event cannot be parsed for some other reason, the +// function will return a special event type: EventNone. +// +// IMPORTANT: EventNone may contain a non-zero N, which means you should skip +// these bytes, because termbox cannot recognize them. +// +// NOTE: This API is experimental and may change in future. +func ParseEvent(data []byte) Event { + event := Event{Type: EventKey} + status := extract_event(data, &event, false) + if status != event_extracted { + return Event{Type: EventNone, N: event.N} + } + return event +} + +// Wait for an event and return it. This is a blocking function call. Instead +// of EventKey and EventMouse it returns EventRaw events. Raw event is written +// into `data` slice and Event's N field is set to the amount of bytes written. +// The minimum required length of the 'data' slice is 1. This requirement may +// vary on different platforms. +// +// NOTE: This API is experimental and may change in future. +func PollRawEvent(data []byte) Event { + if len(data) == 0 { + panic("len(data) >= 1 is a requirement") + } + + var event Event + if extract_raw_event(data, &event) { + return event + } + + for { + select { + case ev := <-input_comm: + if ev.err != nil { + return Event{Type: EventError, Err: ev.err} + } + + inbuf = append(inbuf, ev.data...) + input_comm <- ev + if extract_raw_event(data, &event) { + return event + } + case <-interrupt_comm: + event.Type = EventInterrupt + return event + + case <-sigwinch: + event.Type = EventResize + event.Width, event.Height = get_term_size(out.Fd()) + return event + } + } +} + +// Wait for an event and return it. This is a blocking function call. +func PollEvent() Event { + // Constant governing macOS specific behavior. See https://github.com/nsf/termbox-go/issues/132 + // This is an arbitrary delay which hopefully will be enough time for any lagging + // partial escape sequences to come through. + const esc_wait_delay = 100 * time.Millisecond + + var event Event + var esc_wait_timer *time.Timer + var esc_timeout <-chan time.Time + + // try to extract event from input buffer, return on success + event.Type = EventKey + status := extract_event(inbuf, &event, true) + if event.N != 0 { + copy(inbuf, inbuf[event.N:]) + inbuf = inbuf[:len(inbuf)-event.N] + } + if status == event_extracted { + return event + } else if status == esc_wait { + esc_wait_timer = time.NewTimer(esc_wait_delay) + esc_timeout = esc_wait_timer.C + } + + for { + select { + case ev := <-input_comm: + if esc_wait_timer != nil { + if !esc_wait_timer.Stop() { + <-esc_wait_timer.C + } + esc_wait_timer = nil + } + + if ev.err != nil { + return Event{Type: EventError, Err: ev.err} + } + + inbuf = append(inbuf, ev.data...) + input_comm <- ev + status := extract_event(inbuf, &event, true) + if event.N != 0 { + copy(inbuf, inbuf[event.N:]) + inbuf = inbuf[:len(inbuf)-event.N] + } + if status == event_extracted { + return event + } else if status == esc_wait { + esc_wait_timer = time.NewTimer(esc_wait_delay) + esc_timeout = esc_wait_timer.C + } + case <-esc_timeout: + esc_wait_timer = nil + + status := extract_event(inbuf, &event, false) + if event.N != 0 { + copy(inbuf, inbuf[event.N:]) + inbuf = inbuf[:len(inbuf)-event.N] + } + if status == event_extracted { + return event + } + case <-interrupt_comm: + event.Type = EventInterrupt + return event + + case <-sigwinch: + event.Type = EventResize + event.Width, event.Height = get_term_size(out.Fd()) + return event + } + } +} + +// Returns the size of the internal back buffer (which is mostly the same as +// terminal's window size in characters). But it doesn't always match the size +// of the terminal window, after the terminal size has changed, the internal +// back buffer will get in sync only after Clear or Flush function calls. +func Size() (width int, height int) { + return termw, termh +} + +// Clears the internal back buffer. +func Clear(fg, bg Attribute) error { + foreground, background = fg, bg + err := update_size_maybe() + back_buffer.clear() + return err +} + +// Sets termbox input mode. Termbox has two input modes: +// +// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match +// any known sequence. ESC means KeyEsc. This is the default input mode. +// +// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match +// any known sequence. ESC enables ModAlt modifier for the next keyboard event. +// +// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will +// enable mouse button press/release and drag events. +// +// If 'mode' is InputCurrent, returns the current input mode. See also Input* +// constants. +func SetInputMode(mode InputMode) InputMode { + if mode == InputCurrent { + return input_mode + } + if mode&(InputEsc|InputAlt) == 0 { + mode |= InputEsc + } + if mode&(InputEsc|InputAlt) == InputEsc|InputAlt { + mode &^= InputAlt + } + if mode&InputMouse != 0 { + out.WriteString(funcs[t_enter_mouse]) + } else { + out.WriteString(funcs[t_exit_mouse]) + } + + input_mode = mode + return input_mode +} + +// Sets the termbox output mode. Termbox has four output options: +// +// 1. OutputNormal => [1..8] +// This mode provides 8 different colors: +// black, red, green, yellow, blue, magenta, cyan, white +// Shortcut: ColorBlack, ColorRed, ... +// Attributes: AttrBold, AttrUnderline, AttrReverse +// +// Example usage: +// SetCell(x, y, '@', ColorBlack | AttrBold, ColorRed); +// +// 2. Output256 => [1..256] +// In this mode you can leverage the 256 terminal mode: +// 0x01 - 0x08: the 8 colors as in OutputNormal +// 0x09 - 0x10: Color* | AttrBold +// 0x11 - 0xe8: 216 different colors +// 0xe9 - 0x1ff: 24 different shades of grey +// +// Example usage: +// SetCell(x, y, '@', 184, 240); +// SetCell(x, y, '@', 0xb8, 0xf0); +// +// 3. Output216 => [1..216] +// This mode supports the 3rd range of the 256 mode only. +// But you don't need to provide an offset. +// +// 4. OutputGrayscale => [1..26] +// This mode supports the 4th range of the 256 mode +// and black and white colors from 3th range of the 256 mode +// But you don't need to provide an offset. +// +// In all modes, 0x00 represents the default color. +// +// `go run _demos/output.go` to see its impact on your terminal. +// +// If 'mode' is OutputCurrent, it returns the current output mode. +// +// Note that this may return a different OutputMode than the one requested, +// as the requested mode may not be available on the target platform. +func SetOutputMode(mode OutputMode) OutputMode { + if mode == OutputCurrent { + return output_mode + } + + output_mode = mode + return output_mode +} + +// Sync comes handy when something causes desync between termbox's understanding +// of a terminal buffer and the reality. Such as a third party process. Sync +// forces a complete resync between the termbox and a terminal, it may not be +// visually pretty though. +func Sync() error { + front_buffer.clear() + err := send_clear() + if err != nil { + return err + } + + return Flush() +} diff --git a/vendor/github.com/nsf/termbox-go/api_common.go b/vendor/github.com/nsf/termbox-go/api_common.go new file mode 100644 index 0000000..5ca1371 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/api_common.go @@ -0,0 +1,187 @@ +// termbox is a library for creating cross-platform text-based interfaces +package termbox + +// public API, common OS agnostic part + +type ( + InputMode int + OutputMode int + EventType uint8 + Modifier uint8 + Key uint16 + Attribute uint16 +) + +// This type represents a termbox event. The 'Mod', 'Key' and 'Ch' fields are +// valid if 'Type' is EventKey. The 'Width' and 'Height' fields are valid if +// 'Type' is EventResize. The 'Err' field is valid if 'Type' is EventError. +type Event struct { + Type EventType // one of Event* constants + Mod Modifier // one of Mod* constants or 0 + Key Key // one of Key* constants, invalid if 'Ch' is not 0 + Ch rune // a unicode character + Width int // width of the screen + Height int // height of the screen + Err error // error in case if input failed + MouseX int // x coord of mouse + MouseY int // y coord of mouse + N int // number of bytes written when getting a raw event +} + +// A cell, single conceptual entity on the screen. The screen is basically a 2d +// array of cells. 'Ch' is a unicode character, 'Fg' and 'Bg' are foreground +// and background attributes respectively. +type Cell struct { + Ch rune + Fg Attribute + Bg Attribute +} + +// To know if termbox has been initialized or not +var ( + IsInit bool = false +) + +// Key constants, see Event.Key field. +const ( + KeyF1 Key = 0xFFFF - iota + KeyF2 + KeyF3 + KeyF4 + KeyF5 + KeyF6 + KeyF7 + KeyF8 + KeyF9 + KeyF10 + KeyF11 + KeyF12 + KeyInsert + KeyDelete + KeyHome + KeyEnd + KeyPgup + KeyPgdn + KeyArrowUp + KeyArrowDown + KeyArrowLeft + KeyArrowRight + key_min // see terminfo + MouseLeft + MouseMiddle + MouseRight + MouseRelease + MouseWheelUp + MouseWheelDown +) + +const ( + KeyCtrlTilde Key = 0x00 + KeyCtrl2 Key = 0x00 + KeyCtrlSpace Key = 0x00 + KeyCtrlA Key = 0x01 + KeyCtrlB Key = 0x02 + KeyCtrlC Key = 0x03 + KeyCtrlD Key = 0x04 + KeyCtrlE Key = 0x05 + KeyCtrlF Key = 0x06 + KeyCtrlG Key = 0x07 + KeyBackspace Key = 0x08 + KeyCtrlH Key = 0x08 + KeyTab Key = 0x09 + KeyCtrlI Key = 0x09 + KeyCtrlJ Key = 0x0A + KeyCtrlK Key = 0x0B + KeyCtrlL Key = 0x0C + KeyEnter Key = 0x0D + KeyCtrlM Key = 0x0D + KeyCtrlN Key = 0x0E + KeyCtrlO Key = 0x0F + KeyCtrlP Key = 0x10 + KeyCtrlQ Key = 0x11 + KeyCtrlR Key = 0x12 + KeyCtrlS Key = 0x13 + KeyCtrlT Key = 0x14 + KeyCtrlU Key = 0x15 + KeyCtrlV Key = 0x16 + KeyCtrlW Key = 0x17 + KeyCtrlX Key = 0x18 + KeyCtrlY Key = 0x19 + KeyCtrlZ Key = 0x1A + KeyEsc Key = 0x1B + KeyCtrlLsqBracket Key = 0x1B + KeyCtrl3 Key = 0x1B + KeyCtrl4 Key = 0x1C + KeyCtrlBackslash Key = 0x1C + KeyCtrl5 Key = 0x1D + KeyCtrlRsqBracket Key = 0x1D + KeyCtrl6 Key = 0x1E + KeyCtrl7 Key = 0x1F + KeyCtrlSlash Key = 0x1F + KeyCtrlUnderscore Key = 0x1F + KeySpace Key = 0x20 + KeyBackspace2 Key = 0x7F + KeyCtrl8 Key = 0x7F +) + +// Alt modifier constant, see Event.Mod field and SetInputMode function. +const ( + ModAlt Modifier = 1 << iota + ModMotion +) + +// Cell colors, you can combine a color with multiple attributes using bitwise +// OR ('|'). +const ( + ColorDefault Attribute = iota + ColorBlack + ColorRed + ColorGreen + ColorYellow + ColorBlue + ColorMagenta + ColorCyan + ColorWhite +) + +// Cell attributes, it is possible to use multiple attributes by combining them +// using bitwise OR ('|'). Although, colors cannot be combined. But you can +// combine attributes and a single color. +// +// It's worth mentioning that some platforms don't support certain attributes. +// For example windows console doesn't support AttrUnderline. And on some +// terminals applying AttrBold to background may result in blinking text. Use +// them with caution and test your code on various terminals. +const ( + AttrBold Attribute = 1 << (iota + 9) + AttrUnderline + AttrReverse +) + +// Input mode. See SetInputMode function. +const ( + InputEsc InputMode = 1 << iota + InputAlt + InputMouse + InputCurrent InputMode = 0 +) + +// Output mode. See SetOutputMode function. +const ( + OutputCurrent OutputMode = iota + OutputNormal + Output256 + Output216 + OutputGrayscale +) + +// Event type. See Event.Type field. +const ( + EventKey EventType = iota + EventResize + EventMouse + EventError + EventInterrupt + EventRaw + EventNone +) diff --git a/vendor/github.com/nsf/termbox-go/api_windows.go b/vendor/github.com/nsf/termbox-go/api_windows.go new file mode 100644 index 0000000..7def30a --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/api_windows.go @@ -0,0 +1,239 @@ +package termbox + +import ( + "syscall" +) + +// public API + +// Initializes termbox library. This function should be called before any other functions. +// After successful initialization, the library must be finalized using 'Close' function. +// +// Example usage: +// err := termbox.Init() +// if err != nil { +// panic(err) +// } +// defer termbox.Close() +func Init() error { + var err error + + interrupt, err = create_event() + if err != nil { + return err + } + + in, err = syscall.Open("CONIN$", syscall.O_RDWR, 0) + if err != nil { + return err + } + out, err = syscall.Open("CONOUT$", syscall.O_RDWR, 0) + if err != nil { + return err + } + + err = get_console_mode(in, &orig_mode) + if err != nil { + return err + } + + err = set_console_mode(in, enable_window_input) + if err != nil { + return err + } + + orig_size = get_term_size(out) + win_size := get_win_size(out) + + err = set_console_screen_buffer_size(out, win_size) + if err != nil { + return err + } + + err = get_console_cursor_info(out, &orig_cursor_info) + if err != nil { + return err + } + + show_cursor(false) + term_size = get_term_size(out) + back_buffer.init(int(term_size.x), int(term_size.y)) + front_buffer.init(int(term_size.x), int(term_size.y)) + back_buffer.clear() + front_buffer.clear() + clear() + + diffbuf = make([]diff_msg, 0, 32) + + go input_event_producer() + IsInit = true + return nil +} + +// Finalizes termbox library, should be called after successful initialization +// when termbox's functionality isn't required anymore. +func Close() { + // we ignore errors here, because we can't really do anything about them + Clear(0, 0) + Flush() + + // stop event producer + cancel_comm <- true + set_event(interrupt) + select { + case <-input_comm: + default: + } + <-cancel_done_comm + + set_console_cursor_info(out, &orig_cursor_info) + set_console_cursor_position(out, coord{}) + set_console_screen_buffer_size(out, orig_size) + set_console_mode(in, orig_mode) + syscall.Close(in) + syscall.Close(out) + syscall.Close(interrupt) + IsInit = false +} + +// Interrupt an in-progress call to PollEvent by causing it to return +// EventInterrupt. Note that this function will block until the PollEvent +// function has successfully been interrupted. +func Interrupt() { + interrupt_comm <- struct{}{} +} + +// Synchronizes the internal back buffer with the terminal. +func Flush() error { + update_size_maybe() + prepare_diff_messages() + for _, diff := range diffbuf { + r := small_rect{ + left: 0, + top: diff.pos, + right: term_size.x - 1, + bottom: diff.pos + diff.lines - 1, + } + write_console_output(out, diff.chars, r) + } + if !is_cursor_hidden(cursor_x, cursor_y) { + move_cursor(cursor_x, cursor_y) + } + return nil +} + +// Sets the position of the cursor. See also HideCursor(). +func SetCursor(x, y int) { + if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) { + show_cursor(true) + } + + if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) { + show_cursor(false) + } + + cursor_x, cursor_y = x, y + if !is_cursor_hidden(cursor_x, cursor_y) { + move_cursor(cursor_x, cursor_y) + } +} + +// The shortcut for SetCursor(-1, -1). +func HideCursor() { + SetCursor(cursor_hidden, cursor_hidden) +} + +// Changes cell's parameters in the internal back buffer at the specified +// position. +func SetCell(x, y int, ch rune, fg, bg Attribute) { + if x < 0 || x >= back_buffer.width { + return + } + if y < 0 || y >= back_buffer.height { + return + } + + back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg} +} + +// Returns a slice into the termbox's back buffer. You can get its dimensions +// using 'Size' function. The slice remains valid as long as no 'Clear' or +// 'Flush' function calls were made after call to this function. +func CellBuffer() []Cell { + return back_buffer.cells +} + +// Wait for an event and return it. This is a blocking function call. +func PollEvent() Event { + select { + case ev := <-input_comm: + return ev + case <-interrupt_comm: + return Event{Type: EventInterrupt} + } +} + +// Returns the size of the internal back buffer (which is mostly the same as +// console's window size in characters). But it doesn't always match the size +// of the console window, after the console size has changed, the internal back +// buffer will get in sync only after Clear or Flush function calls. +func Size() (int, int) { + return int(term_size.x), int(term_size.y) +} + +// Clears the internal back buffer. +func Clear(fg, bg Attribute) error { + foreground, background = fg, bg + update_size_maybe() + back_buffer.clear() + return nil +} + +// Sets termbox input mode. Termbox has two input modes: +// +// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match +// any known sequence. ESC means KeyEsc. This is the default input mode. +// +// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match +// any known sequence. ESC enables ModAlt modifier for the next keyboard event. +// +// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will +// enable mouse button press/release and drag events. +// +// If 'mode' is InputCurrent, returns the current input mode. See also Input* +// constants. +func SetInputMode(mode InputMode) InputMode { + if mode == InputCurrent { + return input_mode + } + if mode&InputMouse != 0 { + err := set_console_mode(in, enable_window_input|enable_mouse_input|enable_extended_flags) + if err != nil { + panic(err) + } + } else { + err := set_console_mode(in, enable_window_input) + if err != nil { + panic(err) + } + } + + input_mode = mode + return input_mode +} + +// Sets the termbox output mode. +// +// Windows console does not support extra colour modes, +// so this will always set and return OutputNormal. +func SetOutputMode(mode OutputMode) OutputMode { + return OutputNormal +} + +// Sync comes handy when something causes desync between termbox's understanding +// of a terminal buffer and the reality. Such as a third party process. Sync +// forces a complete resync between the termbox and a terminal, it may not be +// visually pretty though. At the moment on Windows it does nothing. +func Sync() error { + return nil +} diff --git a/vendor/github.com/nsf/termbox-go/collect_terminfo.py b/vendor/github.com/nsf/termbox-go/collect_terminfo.py new file mode 100644 index 0000000..5e50975 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/collect_terminfo.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +import sys, os, subprocess + +def escaped(s): + return repr(s)[1:-1] + +def tput(term, name): + try: + return subprocess.check_output(['tput', '-T%s' % term, name]).decode() + except subprocess.CalledProcessError as e: + return e.output.decode() + + +def w(s): + if s == None: + return + sys.stdout.write(s) + +terminals = { + 'xterm' : 'xterm', + 'rxvt-256color' : 'rxvt_256color', + 'rxvt-unicode' : 'rxvt_unicode', + 'linux' : 'linux', + 'Eterm' : 'eterm', + 'screen' : 'screen' +} + +keys = [ + "F1", "kf1", + "F2", "kf2", + "F3", "kf3", + "F4", "kf4", + "F5", "kf5", + "F6", "kf6", + "F7", "kf7", + "F8", "kf8", + "F9", "kf9", + "F10", "kf10", + "F11", "kf11", + "F12", "kf12", + "INSERT", "kich1", + "DELETE", "kdch1", + "HOME", "khome", + "END", "kend", + "PGUP", "kpp", + "PGDN", "knp", + "KEY_UP", "kcuu1", + "KEY_DOWN", "kcud1", + "KEY_LEFT", "kcub1", + "KEY_RIGHT", "kcuf1" +] + +funcs = [ + "T_ENTER_CA", "smcup", + "T_EXIT_CA", "rmcup", + "T_SHOW_CURSOR", "cnorm", + "T_HIDE_CURSOR", "civis", + "T_CLEAR_SCREEN", "clear", + "T_SGR0", "sgr0", + "T_UNDERLINE", "smul", + "T_BOLD", "bold", + "T_BLINK", "blink", + "T_REVERSE", "rev", + "T_ENTER_KEYPAD", "smkx", + "T_EXIT_KEYPAD", "rmkx" +] + +def iter_pairs(iterable): + iterable = iter(iterable) + while True: + yield (next(iterable), next(iterable)) + +def do_term(term, nick): + w("// %s\n" % term) + w("var %s_keys = []string{\n\t" % nick) + for k, v in iter_pairs(keys): + w('"') + w(escaped(tput(term, v))) + w('",') + w("\n}\n") + w("var %s_funcs = []string{\n\t" % nick) + for k,v in iter_pairs(funcs): + w('"') + if v == "sgr": + w("\\033[3%d;4%dm") + elif v == "cup": + w("\\033[%d;%dH") + else: + w(escaped(tput(term, v))) + w('", ') + w("\n}\n\n") + +def do_terms(d): + w("var terms = []struct {\n") + w("\tname string\n") + w("\tkeys []string\n") + w("\tfuncs []string\n") + w("}{\n") + for k, v in d.items(): + w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v)) + w("}\n\n") + +w("// +build !windows\n\npackage termbox\n\n") + +for k,v in terminals.items(): + do_term(k, v) + +do_terms(terminals) + diff --git a/vendor/github.com/nsf/termbox-go/escwait.go b/vendor/github.com/nsf/termbox-go/escwait.go new file mode 100644 index 0000000..b7bbb89 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/escwait.go @@ -0,0 +1,11 @@ +// +build !darwin + +package termbox + +// On all systems other than macOS, disable behavior which will wait before +// deciding that the escape key was pressed, to account for partially send +// escape sequences, especially with regard to lengthy mouse sequences. +// See https://github.com/nsf/termbox-go/issues/132 +func enable_wait_for_escape_sequence() bool { + return false +} diff --git a/vendor/github.com/nsf/termbox-go/escwait_darwin.go b/vendor/github.com/nsf/termbox-go/escwait_darwin.go new file mode 100644 index 0000000..dde69b6 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/escwait_darwin.go @@ -0,0 +1,9 @@ +package termbox + +// On macOS, enable behavior which will wait before deciding that the escape +// key was pressed, to account for partially send escape sequences, especially +// with regard to lengthy mouse sequences. +// See https://github.com/nsf/termbox-go/issues/132 +func enable_wait_for_escape_sequence() bool { + return true +} diff --git a/vendor/github.com/nsf/termbox-go/syscalls.go b/vendor/github.com/nsf/termbox-go/syscalls.go new file mode 100644 index 0000000..4f52bb9 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls.go @@ -0,0 +1,39 @@ +// +build ignore + +package termbox + +/* +#include +#include +*/ +import "C" + +type syscall_Termios C.struct_termios + +const ( + syscall_IGNBRK = C.IGNBRK + syscall_BRKINT = C.BRKINT + syscall_PARMRK = C.PARMRK + syscall_ISTRIP = C.ISTRIP + syscall_INLCR = C.INLCR + syscall_IGNCR = C.IGNCR + syscall_ICRNL = C.ICRNL + syscall_IXON = C.IXON + syscall_OPOST = C.OPOST + syscall_ECHO = C.ECHO + syscall_ECHONL = C.ECHONL + syscall_ICANON = C.ICANON + syscall_ISIG = C.ISIG + syscall_IEXTEN = C.IEXTEN + syscall_CSIZE = C.CSIZE + syscall_PARENB = C.PARENB + syscall_CS8 = C.CS8 + syscall_VMIN = C.VMIN + syscall_VTIME = C.VTIME + + // on darwin change these to (on *bsd too?): + // C.TIOCGETA + // C.TIOCSETA + syscall_TCGETS = C.TCGETS + syscall_TCSETS = C.TCSETS +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go new file mode 100644 index 0000000..25b78f7 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go @@ -0,0 +1,41 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +// +build !amd64 + +package termbox + +type syscall_Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x402c7413 + syscall_TCSETS = 0x802c7414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go new file mode 100644 index 0000000..11f25be --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go @@ -0,0 +1,40 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +type syscall_Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + Pad_cgo_0 [4]byte + Ispeed uint64 + Ospeed uint64 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x40487413 + syscall_TCSETS = 0x80487414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go new file mode 100644 index 0000000..e03624e --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go @@ -0,0 +1,39 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +type syscall_Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x402c7413 + syscall_TCSETS = 0x802c7414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go new file mode 100644 index 0000000..e03624e --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go @@ -0,0 +1,39 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +type syscall_Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x402c7413 + syscall_TCSETS = 0x802c7414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_linux.go b/vendor/github.com/nsf/termbox-go/syscalls_linux.go new file mode 100644 index 0000000..b88960d --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_linux.go @@ -0,0 +1,33 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +import "syscall" + +type syscall_Termios syscall.Termios + +const ( + syscall_IGNBRK = syscall.IGNBRK + syscall_BRKINT = syscall.BRKINT + syscall_PARMRK = syscall.PARMRK + syscall_ISTRIP = syscall.ISTRIP + syscall_INLCR = syscall.INLCR + syscall_IGNCR = syscall.IGNCR + syscall_ICRNL = syscall.ICRNL + syscall_IXON = syscall.IXON + syscall_OPOST = syscall.OPOST + syscall_ECHO = syscall.ECHO + syscall_ECHONL = syscall.ECHONL + syscall_ICANON = syscall.ICANON + syscall_ISIG = syscall.ISIG + syscall_IEXTEN = syscall.IEXTEN + syscall_CSIZE = syscall.CSIZE + syscall_PARENB = syscall.PARENB + syscall_CS8 = syscall.CS8 + syscall_VMIN = syscall.VMIN + syscall_VTIME = syscall.VTIME + + syscall_TCGETS = syscall.TCGETS + syscall_TCSETS = syscall.TCSETS +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go new file mode 100644 index 0000000..49a3355 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go @@ -0,0 +1,39 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +type syscall_Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x402c7413 + syscall_TCSETS = 0x802c7414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go new file mode 100644 index 0000000..49a3355 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go @@ -0,0 +1,39 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs syscalls.go + +package termbox + +type syscall_Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +const ( + syscall_IGNBRK = 0x1 + syscall_BRKINT = 0x2 + syscall_PARMRK = 0x8 + syscall_ISTRIP = 0x20 + syscall_INLCR = 0x40 + syscall_IGNCR = 0x80 + syscall_ICRNL = 0x100 + syscall_IXON = 0x200 + syscall_OPOST = 0x1 + syscall_ECHO = 0x8 + syscall_ECHONL = 0x10 + syscall_ICANON = 0x100 + syscall_ISIG = 0x80 + syscall_IEXTEN = 0x400 + syscall_CSIZE = 0x300 + syscall_PARENB = 0x1000 + syscall_CS8 = 0x300 + syscall_VMIN = 0x10 + syscall_VTIME = 0x11 + + syscall_TCGETS = 0x402c7413 + syscall_TCSETS = 0x802c7414 +) diff --git a/vendor/github.com/nsf/termbox-go/syscalls_windows.go b/vendor/github.com/nsf/termbox-go/syscalls_windows.go new file mode 100644 index 0000000..472d002 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/syscalls_windows.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs -- -DUNICODE syscalls.go + +package termbox + +const ( + foreground_blue = 0x1 + foreground_green = 0x2 + foreground_red = 0x4 + foreground_intensity = 0x8 + background_blue = 0x10 + background_green = 0x20 + background_red = 0x40 + background_intensity = 0x80 + std_input_handle = -0xa + std_output_handle = -0xb + key_event = 0x1 + mouse_event = 0x2 + window_buffer_size_event = 0x4 + enable_window_input = 0x8 + enable_mouse_input = 0x10 + enable_extended_flags = 0x80 + + vk_f1 = 0x70 + vk_f2 = 0x71 + vk_f3 = 0x72 + vk_f4 = 0x73 + vk_f5 = 0x74 + vk_f6 = 0x75 + vk_f7 = 0x76 + vk_f8 = 0x77 + vk_f9 = 0x78 + vk_f10 = 0x79 + vk_f11 = 0x7a + vk_f12 = 0x7b + vk_insert = 0x2d + vk_delete = 0x2e + vk_home = 0x24 + vk_end = 0x23 + vk_pgup = 0x21 + vk_pgdn = 0x22 + vk_arrow_up = 0x26 + vk_arrow_down = 0x28 + vk_arrow_left = 0x25 + vk_arrow_right = 0x27 + vk_backspace = 0x8 + vk_tab = 0x9 + vk_enter = 0xd + vk_esc = 0x1b + vk_space = 0x20 + + left_alt_pressed = 0x2 + left_ctrl_pressed = 0x8 + right_alt_pressed = 0x1 + right_ctrl_pressed = 0x4 + shift_pressed = 0x10 + + generic_read = 0x80000000 + generic_write = 0x40000000 + console_textmode_buffer = 0x1 +) diff --git a/vendor/github.com/nsf/termbox-go/termbox.go b/vendor/github.com/nsf/termbox-go/termbox.go new file mode 100644 index 0000000..fbe4c3d --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/termbox.go @@ -0,0 +1,529 @@ +// +build !windows + +package termbox + +import "unicode/utf8" +import "bytes" +import "syscall" +import "unsafe" +import "strings" +import "strconv" +import "os" +import "io" + +// private API + +const ( + t_enter_ca = iota + t_exit_ca + t_show_cursor + t_hide_cursor + t_clear_screen + t_sgr0 + t_underline + t_bold + t_blink + t_reverse + t_enter_keypad + t_exit_keypad + t_enter_mouse + t_exit_mouse + t_max_funcs +) + +const ( + coord_invalid = -2 + attr_invalid = Attribute(0xFFFF) +) + +type input_event struct { + data []byte + err error +} + +type extract_event_res int + +const ( + event_not_extracted extract_event_res = iota + event_extracted + esc_wait +) + +var ( + // term specific sequences + keys []string + funcs []string + + // termbox inner state + orig_tios syscall_Termios + back_buffer cellbuf + front_buffer cellbuf + termw int + termh int + input_mode = InputEsc + output_mode = OutputNormal + out *os.File + in int + lastfg = attr_invalid + lastbg = attr_invalid + lastx = coord_invalid + lasty = coord_invalid + cursor_x = cursor_hidden + cursor_y = cursor_hidden + foreground = ColorDefault + background = ColorDefault + inbuf = make([]byte, 0, 64) + outbuf bytes.Buffer + sigwinch = make(chan os.Signal, 1) + sigio = make(chan os.Signal, 1) + quit = make(chan int) + input_comm = make(chan input_event) + interrupt_comm = make(chan struct{}) + intbuf = make([]byte, 0, 16) + + // grayscale indexes + grayscale = []Attribute{ + 0, 17, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 232, + } +) + +func write_cursor(x, y int) { + outbuf.WriteString("\033[") + outbuf.Write(strconv.AppendUint(intbuf, uint64(y+1), 10)) + outbuf.WriteString(";") + outbuf.Write(strconv.AppendUint(intbuf, uint64(x+1), 10)) + outbuf.WriteString("H") +} + +func write_sgr_fg(a Attribute) { + switch output_mode { + case Output256, Output216, OutputGrayscale: + outbuf.WriteString("\033[38;5;") + outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10)) + outbuf.WriteString("m") + default: + outbuf.WriteString("\033[3") + outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10)) + outbuf.WriteString("m") + } +} + +func write_sgr_bg(a Attribute) { + switch output_mode { + case Output256, Output216, OutputGrayscale: + outbuf.WriteString("\033[48;5;") + outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10)) + outbuf.WriteString("m") + default: + outbuf.WriteString("\033[4") + outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10)) + outbuf.WriteString("m") + } +} + +func write_sgr(fg, bg Attribute) { + switch output_mode { + case Output256, Output216, OutputGrayscale: + outbuf.WriteString("\033[38;5;") + outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10)) + outbuf.WriteString("m") + outbuf.WriteString("\033[48;5;") + outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10)) + outbuf.WriteString("m") + default: + outbuf.WriteString("\033[3") + outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10)) + outbuf.WriteString(";4") + outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10)) + outbuf.WriteString("m") + } +} + +type winsize struct { + rows uint16 + cols uint16 + xpixels uint16 + ypixels uint16 +} + +func get_term_size(fd uintptr) (int, int) { + var sz winsize + _, _, _ = syscall.Syscall(syscall.SYS_IOCTL, + fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz))) + return int(sz.cols), int(sz.rows) +} + +func send_attr(fg, bg Attribute) { + if fg == lastfg && bg == lastbg { + return + } + + outbuf.WriteString(funcs[t_sgr0]) + + var fgcol, bgcol Attribute + + switch output_mode { + case Output256: + fgcol = fg & 0x1FF + bgcol = bg & 0x1FF + case Output216: + fgcol = fg & 0xFF + bgcol = bg & 0xFF + if fgcol > 216 { + fgcol = ColorDefault + } + if bgcol > 216 { + bgcol = ColorDefault + } + if fgcol != ColorDefault { + fgcol += 0x10 + } + if bgcol != ColorDefault { + bgcol += 0x10 + } + case OutputGrayscale: + fgcol = fg & 0x1F + bgcol = bg & 0x1F + if fgcol > 26 { + fgcol = ColorDefault + } + if bgcol > 26 { + bgcol = ColorDefault + } + if fgcol != ColorDefault { + fgcol = grayscale[fgcol] + } + if bgcol != ColorDefault { + bgcol = grayscale[bgcol] + } + default: + fgcol = fg & 0x0F + bgcol = bg & 0x0F + } + + if fgcol != ColorDefault { + if bgcol != ColorDefault { + write_sgr(fgcol, bgcol) + } else { + write_sgr_fg(fgcol) + } + } else if bgcol != ColorDefault { + write_sgr_bg(bgcol) + } + + if fg&AttrBold != 0 { + outbuf.WriteString(funcs[t_bold]) + } + if bg&AttrBold != 0 { + outbuf.WriteString(funcs[t_blink]) + } + if fg&AttrUnderline != 0 { + outbuf.WriteString(funcs[t_underline]) + } + if fg&AttrReverse|bg&AttrReverse != 0 { + outbuf.WriteString(funcs[t_reverse]) + } + + lastfg, lastbg = fg, bg +} + +func send_char(x, y int, ch rune) { + var buf [8]byte + n := utf8.EncodeRune(buf[:], ch) + if x-1 != lastx || y != lasty { + write_cursor(x, y) + } + lastx, lasty = x, y + outbuf.Write(buf[:n]) +} + +func flush() error { + _, err := io.Copy(out, &outbuf) + outbuf.Reset() + return err +} + +func send_clear() error { + send_attr(foreground, background) + outbuf.WriteString(funcs[t_clear_screen]) + if !is_cursor_hidden(cursor_x, cursor_y) { + write_cursor(cursor_x, cursor_y) + } + + // we need to invalidate cursor position too and these two vars are + // used only for simple cursor positioning optimization, cursor + // actually may be in the correct place, but we simply discard + // optimization once and it gives us simple solution for the case when + // cursor moved + lastx = coord_invalid + lasty = coord_invalid + + return flush() +} + +func update_size_maybe() error { + w, h := get_term_size(out.Fd()) + if w != termw || h != termh { + termw, termh = w, h + back_buffer.resize(termw, termh) + front_buffer.resize(termw, termh) + front_buffer.clear() + return send_clear() + } + return nil +} + +func tcsetattr(fd uintptr, termios *syscall_Termios) error { + r, _, e := syscall.Syscall(syscall.SYS_IOCTL, + fd, uintptr(syscall_TCSETS), uintptr(unsafe.Pointer(termios))) + if r != 0 { + return os.NewSyscallError("SYS_IOCTL", e) + } + return nil +} + +func tcgetattr(fd uintptr, termios *syscall_Termios) error { + r, _, e := syscall.Syscall(syscall.SYS_IOCTL, + fd, uintptr(syscall_TCGETS), uintptr(unsafe.Pointer(termios))) + if r != 0 { + return os.NewSyscallError("SYS_IOCTL", e) + } + return nil +} + +func parse_mouse_event(event *Event, buf string) (int, bool) { + if strings.HasPrefix(buf, "\033[M") && len(buf) >= 6 { + // X10 mouse encoding, the simplest one + // \033 [ M Cb Cx Cy + b := buf[3] - 32 + switch b & 3 { + case 0: + if b&64 != 0 { + event.Key = MouseWheelUp + } else { + event.Key = MouseLeft + } + case 1: + if b&64 != 0 { + event.Key = MouseWheelDown + } else { + event.Key = MouseMiddle + } + case 2: + event.Key = MouseRight + case 3: + event.Key = MouseRelease + default: + return 6, false + } + event.Type = EventMouse // KeyEvent by default + if b&32 != 0 { + event.Mod |= ModMotion + } + + // the coord is 1,1 for upper left + event.MouseX = int(buf[4]) - 1 - 32 + event.MouseY = int(buf[5]) - 1 - 32 + return 6, true + } else if strings.HasPrefix(buf, "\033[<") || strings.HasPrefix(buf, "\033[") { + // xterm 1006 extended mode or urxvt 1015 extended mode + // xterm: \033 [ < Cb ; Cx ; Cy (M or m) + // urxvt: \033 [ Cb ; Cx ; Cy M + + // find the first M or m, that's where we stop + mi := strings.IndexAny(buf, "Mm") + if mi == -1 { + return 0, false + } + + // whether it's a capital M or not + isM := buf[mi] == 'M' + + // whether it's urxvt or not + isU := false + + // buf[2] is safe here, because having M or m found means we have at + // least 3 bytes in a string + if buf[2] == '<' { + buf = buf[3:mi] + } else { + isU = true + buf = buf[2:mi] + } + + s1 := strings.Index(buf, ";") + s2 := strings.LastIndex(buf, ";") + // not found or only one ';' + if s1 == -1 || s2 == -1 || s1 == s2 { + return 0, false + } + + n1, err := strconv.ParseInt(buf[0:s1], 10, 64) + if err != nil { + return 0, false + } + n2, err := strconv.ParseInt(buf[s1+1:s2], 10, 64) + if err != nil { + return 0, false + } + n3, err := strconv.ParseInt(buf[s2+1:], 10, 64) + if err != nil { + return 0, false + } + + // on urxvt, first number is encoded exactly as in X10, but we need to + // make it zero-based, on xterm it is zero-based already + if isU { + n1 -= 32 + } + switch n1 & 3 { + case 0: + if n1&64 != 0 { + event.Key = MouseWheelUp + } else { + event.Key = MouseLeft + } + case 1: + if n1&64 != 0 { + event.Key = MouseWheelDown + } else { + event.Key = MouseMiddle + } + case 2: + event.Key = MouseRight + case 3: + event.Key = MouseRelease + default: + return mi + 1, false + } + if !isM { + // on xterm mouse release is signaled by lowercase m + event.Key = MouseRelease + } + + event.Type = EventMouse // KeyEvent by default + if n1&32 != 0 { + event.Mod |= ModMotion + } + + event.MouseX = int(n2) - 1 + event.MouseY = int(n3) - 1 + return mi + 1, true + } + + return 0, false +} + +func parse_escape_sequence(event *Event, buf []byte) (int, bool) { + bufstr := string(buf) + for i, key := range keys { + if strings.HasPrefix(bufstr, key) { + event.Ch = 0 + event.Key = Key(0xFFFF - i) + return len(key), true + } + } + + // if none of the keys match, let's try mouse sequences + return parse_mouse_event(event, bufstr) +} + +func extract_raw_event(data []byte, event *Event) bool { + if len(inbuf) == 0 { + return false + } + + n := len(data) + if n == 0 { + return false + } + + n = copy(data, inbuf) + copy(inbuf, inbuf[n:]) + inbuf = inbuf[:len(inbuf)-n] + + event.N = n + event.Type = EventRaw + return true +} + +func extract_event(inbuf []byte, event *Event, allow_esc_wait bool) extract_event_res { + if len(inbuf) == 0 { + event.N = 0 + return event_not_extracted + } + + if inbuf[0] == '\033' { + // possible escape sequence + if n, ok := parse_escape_sequence(event, inbuf); n != 0 { + event.N = n + if ok { + return event_extracted + } else { + return event_not_extracted + } + } + + // possible partially read escape sequence; trigger a wait if appropriate + if enable_wait_for_escape_sequence() && allow_esc_wait { + event.N = 0 + return esc_wait + } + + // it's not escape sequence, then it's Alt or Esc, check input_mode + switch { + case input_mode&InputEsc != 0: + // if we're in escape mode, fill Esc event, pop buffer, return success + event.Ch = 0 + event.Key = KeyEsc + event.Mod = 0 + event.N = 1 + return event_extracted + case input_mode&InputAlt != 0: + // if we're in alt mode, set Alt modifier to event and redo parsing + event.Mod = ModAlt + status := extract_event(inbuf[1:], event, false) + if status == event_extracted { + event.N++ + } else { + event.N = 0 + } + return status + default: + panic("unreachable") + } + } + + // if we're here, this is not an escape sequence and not an alt sequence + // so, it's a FUNCTIONAL KEY or a UNICODE character + + // first of all check if it's a functional key + if Key(inbuf[0]) <= KeySpace || Key(inbuf[0]) == KeyBackspace2 { + // fill event, pop buffer, return success + event.Ch = 0 + event.Key = Key(inbuf[0]) + event.N = 1 + return event_extracted + } + + // the only possible option is utf8 rune + if r, n := utf8.DecodeRune(inbuf); r != utf8.RuneError { + event.Ch = r + event.Key = 0 + event.N = n + return event_extracted + } + + return event_not_extracted +} + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), + uintptr(arg)) + val = int(r) + if e != 0 { + err = e + } + return +} diff --git a/vendor/github.com/nsf/termbox-go/termbox_common.go b/vendor/github.com/nsf/termbox-go/termbox_common.go new file mode 100644 index 0000000..c3355cc --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/termbox_common.go @@ -0,0 +1,59 @@ +package termbox + +// private API, common OS agnostic part + +type cellbuf struct { + width int + height int + cells []Cell +} + +func (this *cellbuf) init(width, height int) { + this.width = width + this.height = height + this.cells = make([]Cell, width*height) +} + +func (this *cellbuf) resize(width, height int) { + if this.width == width && this.height == height { + return + } + + oldw := this.width + oldh := this.height + oldcells := this.cells + + this.init(width, height) + this.clear() + + minw, minh := oldw, oldh + + if width < minw { + minw = width + } + if height < minh { + minh = height + } + + for i := 0; i < minh; i++ { + srco, dsto := i*oldw, i*width + src := oldcells[srco : srco+minw] + dst := this.cells[dsto : dsto+minw] + copy(dst, src) + } +} + +func (this *cellbuf) clear() { + for i := range this.cells { + c := &this.cells[i] + c.Ch = ' ' + c.Fg = foreground + c.Bg = background + } +} + +const cursor_hidden = -1 + +func is_cursor_hidden(x, y int) bool { + return x == cursor_hidden || y == cursor_hidden +} diff --git a/vendor/github.com/nsf/termbox-go/termbox_windows.go b/vendor/github.com/nsf/termbox-go/termbox_windows.go new file mode 100644 index 0000000..7752a17 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/termbox_windows.go @@ -0,0 +1,915 @@ +package termbox + +import "math" +import "syscall" +import "unsafe" +import "unicode/utf16" +import "github.com/mattn/go-runewidth" + +type ( + wchar uint16 + short int16 + dword uint32 + word uint16 + char_info struct { + char wchar + attr word + } + coord struct { + x short + y short + } + small_rect struct { + left short + top short + right short + bottom short + } + console_screen_buffer_info struct { + size coord + cursor_position coord + attributes word + window small_rect + maximum_window_size coord + } + console_cursor_info struct { + size dword + visible int32 + } + input_record struct { + event_type word + _ [2]byte + event [16]byte + } + key_event_record struct { + key_down int32 + repeat_count word + virtual_key_code word + virtual_scan_code word + unicode_char wchar + control_key_state dword + } + window_buffer_size_record struct { + size coord + } + mouse_event_record struct { + mouse_pos coord + button_state dword + control_key_state dword + event_flags dword + } + console_font_info struct { + font uint32 + font_size coord + } +) + +const ( + mouse_lmb = 0x1 + mouse_rmb = 0x2 + mouse_mmb = 0x4 | 0x8 | 0x10 + SM_CXMIN = 28 + SM_CYMIN = 29 +) + +func (this coord) uintptr() uintptr { + return uintptr(*(*int32)(unsafe.Pointer(&this))) +} + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var moduser32 = syscall.NewLazyDLL("user32.dll") +var is_cjk = runewidth.IsEastAsian() + +var ( + proc_set_console_active_screen_buffer = kernel32.NewProc("SetConsoleActiveScreenBuffer") + proc_set_console_screen_buffer_size = kernel32.NewProc("SetConsoleScreenBufferSize") + proc_create_console_screen_buffer = kernel32.NewProc("CreateConsoleScreenBuffer") + proc_get_console_screen_buffer_info = kernel32.NewProc("GetConsoleScreenBufferInfo") + proc_write_console_output = kernel32.NewProc("WriteConsoleOutputW") + proc_write_console_output_character = kernel32.NewProc("WriteConsoleOutputCharacterW") + proc_write_console_output_attribute = kernel32.NewProc("WriteConsoleOutputAttribute") + proc_set_console_cursor_info = kernel32.NewProc("SetConsoleCursorInfo") + proc_set_console_cursor_position = kernel32.NewProc("SetConsoleCursorPosition") + proc_get_console_cursor_info = kernel32.NewProc("GetConsoleCursorInfo") + proc_read_console_input = kernel32.NewProc("ReadConsoleInputW") + proc_get_console_mode = kernel32.NewProc("GetConsoleMode") + proc_set_console_mode = kernel32.NewProc("SetConsoleMode") + proc_fill_console_output_character = kernel32.NewProc("FillConsoleOutputCharacterW") + proc_fill_console_output_attribute = kernel32.NewProc("FillConsoleOutputAttribute") + proc_create_event = kernel32.NewProc("CreateEventW") + proc_wait_for_multiple_objects = kernel32.NewProc("WaitForMultipleObjects") + proc_set_event = kernel32.NewProc("SetEvent") + proc_get_current_console_font = kernel32.NewProc("GetCurrentConsoleFont") + get_system_metrics = moduser32.NewProc("GetSystemMetrics") +) + +func set_console_active_screen_buffer(h syscall.Handle) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_console_active_screen_buffer.Addr(), + 1, uintptr(h), 0, 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func set_console_screen_buffer_size(h syscall.Handle, size coord) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_console_screen_buffer_size.Addr(), + 2, uintptr(h), size.uintptr(), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func create_console_screen_buffer() (h syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(proc_create_console_screen_buffer.Addr(), + 5, uintptr(generic_read|generic_write), 0, 0, console_textmode_buffer, 0, 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return syscall.Handle(r0), err +} + +func get_console_screen_buffer_info(h syscall.Handle, info *console_screen_buffer_info) (err error) { + r0, _, e1 := syscall.Syscall(proc_get_console_screen_buffer_info.Addr(), + 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func write_console_output(h syscall.Handle, chars []char_info, dst small_rect) (err error) { + tmp_coord = coord{dst.right - dst.left + 1, dst.bottom - dst.top + 1} + tmp_rect = dst + r0, _, e1 := syscall.Syscall6(proc_write_console_output.Addr(), + 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), tmp_coord.uintptr(), + tmp_coord0.uintptr(), uintptr(unsafe.Pointer(&tmp_rect)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func write_console_output_character(h syscall.Handle, chars []wchar, pos coord) (err error) { + r0, _, e1 := syscall.Syscall6(proc_write_console_output_character.Addr(), + 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), uintptr(len(chars)), + pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func write_console_output_attribute(h syscall.Handle, attrs []word, pos coord) (err error) { + r0, _, e1 := syscall.Syscall6(proc_write_console_output_attribute.Addr(), + 5, uintptr(h), uintptr(unsafe.Pointer(&attrs[0])), uintptr(len(attrs)), + pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func set_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_console_cursor_info.Addr(), + 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func get_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) { + r0, _, e1 := syscall.Syscall(proc_get_console_cursor_info.Addr(), + 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func set_console_cursor_position(h syscall.Handle, pos coord) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_console_cursor_position.Addr(), + 2, uintptr(h), pos.uintptr(), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func read_console_input(h syscall.Handle, record *input_record) (err error) { + r0, _, e1 := syscall.Syscall6(proc_read_console_input.Addr(), + 4, uintptr(h), uintptr(unsafe.Pointer(record)), 1, uintptr(unsafe.Pointer(&tmp_arg)), 0, 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func get_console_mode(h syscall.Handle, mode *dword) (err error) { + r0, _, e1 := syscall.Syscall(proc_get_console_mode.Addr(), + 2, uintptr(h), uintptr(unsafe.Pointer(mode)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func set_console_mode(h syscall.Handle, mode dword) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_console_mode.Addr(), + 2, uintptr(h), uintptr(mode), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func fill_console_output_character(h syscall.Handle, char wchar, n int) (err error) { + r0, _, e1 := syscall.Syscall6(proc_fill_console_output_character.Addr(), + 5, uintptr(h), uintptr(char), uintptr(n), tmp_coord.uintptr(), + uintptr(unsafe.Pointer(&tmp_arg)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func fill_console_output_attribute(h syscall.Handle, attr word, n int) (err error) { + r0, _, e1 := syscall.Syscall6(proc_fill_console_output_attribute.Addr(), + 5, uintptr(h), uintptr(attr), uintptr(n), tmp_coord.uintptr(), + uintptr(unsafe.Pointer(&tmp_arg)), 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func create_event() (out syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(proc_create_event.Addr(), + 4, 0, 0, 0, 0, 0, 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return syscall.Handle(r0), err +} + +func wait_for_multiple_objects(objects []syscall.Handle) (err error) { + r0, _, e1 := syscall.Syscall6(proc_wait_for_multiple_objects.Addr(), + 4, uintptr(len(objects)), uintptr(unsafe.Pointer(&objects[0])), + 0, 0xFFFFFFFF, 0, 0) + if uint32(r0) == 0xFFFFFFFF { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func set_event(ev syscall.Handle) (err error) { + r0, _, e1 := syscall.Syscall(proc_set_event.Addr(), + 1, uintptr(ev), 0, 0) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func get_current_console_font(h syscall.Handle, info *console_font_info) (err error) { + r0, _, e1 := syscall.Syscall(proc_get_current_console_font.Addr(), + 3, uintptr(h), 0, uintptr(unsafe.Pointer(info))) + if int(r0) == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +type diff_msg struct { + pos short + lines short + chars []char_info +} + +type input_event struct { + event Event + err error +} + +var ( + orig_cursor_info console_cursor_info + orig_size coord + orig_mode dword + orig_screen syscall.Handle + back_buffer cellbuf + front_buffer cellbuf + term_size coord + input_mode = InputEsc + cursor_x = cursor_hidden + cursor_y = cursor_hidden + foreground = ColorDefault + background = ColorDefault + in syscall.Handle + out syscall.Handle + interrupt syscall.Handle + charbuf []char_info + diffbuf []diff_msg + beg_x = -1 + beg_y = -1 + beg_i = -1 + input_comm = make(chan Event) + interrupt_comm = make(chan struct{}) + cancel_comm = make(chan bool, 1) + cancel_done_comm = make(chan bool) + alt_mode_esc = false + + // these ones just to prevent heap allocs at all costs + tmp_info console_screen_buffer_info + tmp_arg dword + tmp_coord0 = coord{0, 0} + tmp_coord = coord{0, 0} + tmp_rect = small_rect{0, 0, 0, 0} + tmp_finfo console_font_info +) + +func get_cursor_position(out syscall.Handle) coord { + err := get_console_screen_buffer_info(out, &tmp_info) + if err != nil { + panic(err) + } + return tmp_info.cursor_position +} + +func get_term_size(out syscall.Handle) coord { + err := get_console_screen_buffer_info(out, &tmp_info) + if err != nil { + panic(err) + } + return tmp_info.size +} + +func get_win_min_size(out syscall.Handle) coord { + x, _, err := get_system_metrics.Call(SM_CXMIN) + y, _, err := get_system_metrics.Call(SM_CYMIN) + + if x == 0 || y == 0 { + if err != nil { + panic(err) + } + } + + err1 := get_current_console_font(out, &tmp_finfo) + if err1 != nil { + panic(err1) + } + + return coord{ + x: short(math.Ceil(float64(x) / float64(tmp_finfo.font_size.x))), + y: short(math.Ceil(float64(y) / float64(tmp_finfo.font_size.y))), + } +} + +func get_win_size(out syscall.Handle) coord { + err := get_console_screen_buffer_info(out, &tmp_info) + if err != nil { + panic(err) + } + + min_size := get_win_min_size(out) + + size := coord{ + x: tmp_info.window.right - tmp_info.window.left + 1, + y: tmp_info.window.bottom - tmp_info.window.top + 1, + } + + if size.x < min_size.x { + size.x = min_size.x + } + + if size.y < min_size.y { + size.y = min_size.y + } + + return size +} + +func update_size_maybe() { + size := get_win_size(out) + if size.x != term_size.x || size.y != term_size.y { + set_console_screen_buffer_size(out, size) + term_size = size + back_buffer.resize(int(size.x), int(size.y)) + front_buffer.resize(int(size.x), int(size.y)) + front_buffer.clear() + clear() + + area := int(size.x) * int(size.y) + if cap(charbuf) < area { + charbuf = make([]char_info, 0, area) + } + } +} + +var color_table_bg = []word{ + 0, // default (black) + 0, // black + background_red, + background_green, + background_red | background_green, // yellow + background_blue, + background_red | background_blue, // magenta + background_green | background_blue, // cyan + background_red | background_blue | background_green, // white +} + +var color_table_fg = []word{ + foreground_red | foreground_blue | foreground_green, // default (white) + 0, + foreground_red, + foreground_green, + foreground_red | foreground_green, // yellow + foreground_blue, + foreground_red | foreground_blue, // magenta + foreground_green | foreground_blue, // cyan + foreground_red | foreground_blue | foreground_green, // white +} + +const ( + replacement_char = '\uFFFD' + max_rune = '\U0010FFFF' + surr1 = 0xd800 + surr2 = 0xdc00 + surr3 = 0xe000 + surr_self = 0x10000 +) + +func append_diff_line(y int) int { + n := 0 + for x := 0; x < front_buffer.width; { + cell_offset := y*front_buffer.width + x + back := &back_buffer.cells[cell_offset] + front := &front_buffer.cells[cell_offset] + attr, char := cell_to_char_info(*back) + charbuf = append(charbuf, char_info{attr: attr, char: char[0]}) + *front = *back + n++ + w := runewidth.RuneWidth(back.Ch) + if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) { + w = 1 + } + x += w + // If not CJK, fill trailing space with whitespace + if !is_cjk && w == 2 { + charbuf = append(charbuf, char_info{attr: attr, char: ' '}) + } + } + return n +} + +// compares 'back_buffer' with 'front_buffer' and prepares all changes in the form of +// 'diff_msg's in the 'diff_buf' +func prepare_diff_messages() { + // clear buffers + diffbuf = diffbuf[:0] + charbuf = charbuf[:0] + + var diff diff_msg + gbeg := 0 + for y := 0; y < front_buffer.height; y++ { + same := true + line_offset := y * front_buffer.width + for x := 0; x < front_buffer.width; x++ { + cell_offset := line_offset + x + back := &back_buffer.cells[cell_offset] + front := &front_buffer.cells[cell_offset] + if *back != *front { + same = false + break + } + } + if same && diff.lines > 0 { + diffbuf = append(diffbuf, diff) + diff = diff_msg{} + } + if !same { + beg := len(charbuf) + end := beg + append_diff_line(y) + if diff.lines == 0 { + diff.pos = short(y) + gbeg = beg + } + diff.lines++ + diff.chars = charbuf[gbeg:end] + } + } + if diff.lines > 0 { + diffbuf = append(diffbuf, diff) + diff = diff_msg{} + } +} + +func get_ct(table []word, idx int) word { + idx = idx & 0x0F + if idx >= len(table) { + idx = len(table) - 1 + } + return table[idx] +} + +func cell_to_char_info(c Cell) (attr word, wc [2]wchar) { + attr = get_ct(color_table_fg, int(c.Fg)) | get_ct(color_table_bg, int(c.Bg)) + if c.Fg&AttrReverse|c.Bg&AttrReverse != 0 { + attr = (attr&0xF0)>>4 | (attr&0x0F)<<4 + } + if c.Fg&AttrBold != 0 { + attr |= foreground_intensity + } + if c.Bg&AttrBold != 0 { + attr |= background_intensity + } + + r0, r1 := utf16.EncodeRune(c.Ch) + if r0 == 0xFFFD { + wc[0] = wchar(c.Ch) + wc[1] = ' ' + } else { + wc[0] = wchar(r0) + wc[1] = wchar(r1) + } + return +} + +func move_cursor(x, y int) { + err := set_console_cursor_position(out, coord{short(x), short(y)}) + if err != nil { + panic(err) + } +} + +func show_cursor(visible bool) { + var v int32 + if visible { + v = 1 + } + + var info console_cursor_info + info.size = 100 + info.visible = v + err := set_console_cursor_info(out, &info) + if err != nil { + panic(err) + } +} + +func clear() { + var err error + attr, char := cell_to_char_info(Cell{ + ' ', + foreground, + background, + }) + + area := int(term_size.x) * int(term_size.y) + err = fill_console_output_attribute(out, attr, area) + if err != nil { + panic(err) + } + err = fill_console_output_character(out, char[0], area) + if err != nil { + panic(err) + } + if !is_cursor_hidden(cursor_x, cursor_y) { + move_cursor(cursor_x, cursor_y) + } +} + +func key_event_record_to_event(r *key_event_record) (Event, bool) { + if r.key_down == 0 { + return Event{}, false + } + + e := Event{Type: EventKey} + if input_mode&InputAlt != 0 { + if alt_mode_esc { + e.Mod = ModAlt + alt_mode_esc = false + } + if r.control_key_state&(left_alt_pressed|right_alt_pressed) != 0 { + e.Mod = ModAlt + } + } + + ctrlpressed := r.control_key_state&(left_ctrl_pressed|right_ctrl_pressed) != 0 + + if r.virtual_key_code >= vk_f1 && r.virtual_key_code <= vk_f12 { + switch r.virtual_key_code { + case vk_f1: + e.Key = KeyF1 + case vk_f2: + e.Key = KeyF2 + case vk_f3: + e.Key = KeyF3 + case vk_f4: + e.Key = KeyF4 + case vk_f5: + e.Key = KeyF5 + case vk_f6: + e.Key = KeyF6 + case vk_f7: + e.Key = KeyF7 + case vk_f8: + e.Key = KeyF8 + case vk_f9: + e.Key = KeyF9 + case vk_f10: + e.Key = KeyF10 + case vk_f11: + e.Key = KeyF11 + case vk_f12: + e.Key = KeyF12 + default: + panic("unreachable") + } + + return e, true + } + + if r.virtual_key_code <= vk_delete { + switch r.virtual_key_code { + case vk_insert: + e.Key = KeyInsert + case vk_delete: + e.Key = KeyDelete + case vk_home: + e.Key = KeyHome + case vk_end: + e.Key = KeyEnd + case vk_pgup: + e.Key = KeyPgup + case vk_pgdn: + e.Key = KeyPgdn + case vk_arrow_up: + e.Key = KeyArrowUp + case vk_arrow_down: + e.Key = KeyArrowDown + case vk_arrow_left: + e.Key = KeyArrowLeft + case vk_arrow_right: + e.Key = KeyArrowRight + case vk_backspace: + if ctrlpressed { + e.Key = KeyBackspace2 + } else { + e.Key = KeyBackspace + } + case vk_tab: + e.Key = KeyTab + case vk_enter: + e.Key = KeyEnter + case vk_esc: + switch { + case input_mode&InputEsc != 0: + e.Key = KeyEsc + case input_mode&InputAlt != 0: + alt_mode_esc = true + return Event{}, false + } + case vk_space: + if ctrlpressed { + // manual return here, because KeyCtrlSpace is zero + e.Key = KeyCtrlSpace + return e, true + } else { + e.Key = KeySpace + } + } + + if e.Key != 0 { + return e, true + } + } + + if ctrlpressed { + if Key(r.unicode_char) >= KeyCtrlA && Key(r.unicode_char) <= KeyCtrlRsqBracket { + e.Key = Key(r.unicode_char) + if input_mode&InputAlt != 0 && e.Key == KeyEsc { + alt_mode_esc = true + return Event{}, false + } + return e, true + } + switch r.virtual_key_code { + case 192, 50: + // manual return here, because KeyCtrl2 is zero + e.Key = KeyCtrl2 + return e, true + case 51: + if input_mode&InputAlt != 0 { + alt_mode_esc = true + return Event{}, false + } + e.Key = KeyCtrl3 + case 52: + e.Key = KeyCtrl4 + case 53: + e.Key = KeyCtrl5 + case 54: + e.Key = KeyCtrl6 + case 189, 191, 55: + e.Key = KeyCtrl7 + case 8, 56: + e.Key = KeyCtrl8 + } + + if e.Key != 0 { + return e, true + } + } + + if r.unicode_char != 0 { + e.Ch = rune(r.unicode_char) + return e, true + } + + return Event{}, false +} + +func input_event_producer() { + var r input_record + var err error + var last_button Key + var last_button_pressed Key + var last_state = dword(0) + var last_x, last_y = -1, -1 + handles := []syscall.Handle{in, interrupt} + for { + err = wait_for_multiple_objects(handles) + if err != nil { + input_comm <- Event{Type: EventError, Err: err} + } + + select { + case <-cancel_comm: + cancel_done_comm <- true + return + default: + } + + err = read_console_input(in, &r) + if err != nil { + input_comm <- Event{Type: EventError, Err: err} + } + + switch r.event_type { + case key_event: + kr := (*key_event_record)(unsafe.Pointer(&r.event)) + ev, ok := key_event_record_to_event(kr) + if ok { + for i := 0; i < int(kr.repeat_count); i++ { + input_comm <- ev + } + } + case window_buffer_size_event: + sr := *(*window_buffer_size_record)(unsafe.Pointer(&r.event)) + input_comm <- Event{ + Type: EventResize, + Width: int(sr.size.x), + Height: int(sr.size.y), + } + case mouse_event: + mr := *(*mouse_event_record)(unsafe.Pointer(&r.event)) + ev := Event{Type: EventMouse} + switch mr.event_flags { + case 0, 2: + // single or double click + cur_state := mr.button_state + switch { + case last_state&mouse_lmb == 0 && cur_state&mouse_lmb != 0: + last_button = MouseLeft + last_button_pressed = last_button + case last_state&mouse_rmb == 0 && cur_state&mouse_rmb != 0: + last_button = MouseRight + last_button_pressed = last_button + case last_state&mouse_mmb == 0 && cur_state&mouse_mmb != 0: + last_button = MouseMiddle + last_button_pressed = last_button + case last_state&mouse_lmb != 0 && cur_state&mouse_lmb == 0: + last_button = MouseRelease + case last_state&mouse_rmb != 0 && cur_state&mouse_rmb == 0: + last_button = MouseRelease + case last_state&mouse_mmb != 0 && cur_state&mouse_mmb == 0: + last_button = MouseRelease + default: + last_state = cur_state + continue + } + last_state = cur_state + ev.Key = last_button + last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y) + ev.MouseX = last_x + ev.MouseY = last_y + case 1: + // mouse motion + x, y := int(mr.mouse_pos.x), int(mr.mouse_pos.y) + if last_state != 0 && (last_x != x || last_y != y) { + ev.Key = last_button_pressed + ev.Mod = ModMotion + ev.MouseX = x + ev.MouseY = y + last_x, last_y = x, y + } else { + ev.Type = EventNone + } + case 4: + // mouse wheel + n := int16(mr.button_state >> 16) + if n > 0 { + ev.Key = MouseWheelUp + } else { + ev.Key = MouseWheelDown + } + last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y) + ev.MouseX = last_x + ev.MouseY = last_y + default: + ev.Type = EventNone + } + if ev.Type != EventNone { + input_comm <- ev + } + } + } +} diff --git a/vendor/github.com/nsf/termbox-go/terminfo.go b/vendor/github.com/nsf/termbox-go/terminfo.go new file mode 100644 index 0000000..ab2e7a1 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/terminfo.go @@ -0,0 +1,232 @@ +// +build !windows +// This file contains a simple and incomplete implementation of the terminfo +// database. Information was taken from the ncurses manpages term(5) and +// terminfo(5). Currently, only the string capabilities for special keys and for +// functions without parameters are actually used. Colors are still done with +// ANSI escape sequences. Other special features that are not (yet?) supported +// are reading from ~/.terminfo, the TERMINFO_DIRS variable, Berkeley database +// format and extended capabilities. + +package termbox + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + ti_magic = 0432 + ti_header_length = 12 + ti_mouse_enter = "\x1b[?1000h\x1b[?1002h\x1b[?1015h\x1b[?1006h" + ti_mouse_leave = "\x1b[?1006l\x1b[?1015l\x1b[?1002l\x1b[?1000l" +) + +func load_terminfo() ([]byte, error) { + var data []byte + var err error + + term := os.Getenv("TERM") + if term == "" { + return nil, fmt.Errorf("termbox: TERM not set") + } + + // The following behaviour follows the one described in terminfo(5) as + // distributed by ncurses. + + terminfo := os.Getenv("TERMINFO") + if terminfo != "" { + // if TERMINFO is set, no other directory should be searched + return ti_try_path(terminfo) + } + + // next, consider ~/.terminfo + home := os.Getenv("HOME") + if home != "" { + data, err = ti_try_path(home + "/.terminfo") + if err == nil { + return data, nil + } + } + + // next, TERMINFO_DIRS + dirs := os.Getenv("TERMINFO_DIRS") + if dirs != "" { + for _, dir := range strings.Split(dirs, ":") { + if dir == "" { + // "" -> "/usr/share/terminfo" + dir = "/usr/share/terminfo" + } + data, err = ti_try_path(dir) + if err == nil { + return data, nil + } + } + } + + // next, /lib/terminfo + data, err = ti_try_path("/lib/terminfo") + if err == nil { + return data, nil + } + + // fall back to /usr/share/terminfo + return ti_try_path("/usr/share/terminfo") +} + +func ti_try_path(path string) (data []byte, err error) { + // load_terminfo already made sure it is set + term := os.Getenv("TERM") + + // first try, the typical *nix path + terminfo := path + "/" + term[0:1] + "/" + term + data, err = ioutil.ReadFile(terminfo) + if err == nil { + return + } + + // fallback to darwin specific dirs structure + terminfo = path + "/" + hex.EncodeToString([]byte(term[:1])) + "/" + term + data, err = ioutil.ReadFile(terminfo) + return +} + +func setup_term_builtin() error { + name := os.Getenv("TERM") + if name == "" { + return errors.New("termbox: TERM environment variable not set") + } + + for _, t := range terms { + if t.name == name { + keys = t.keys + funcs = t.funcs + return nil + } + } + + compat_table := []struct { + partial string + keys []string + funcs []string + }{ + {"xterm", xterm_keys, xterm_funcs}, + {"rxvt", rxvt_unicode_keys, rxvt_unicode_funcs}, + {"linux", linux_keys, linux_funcs}, + {"Eterm", eterm_keys, eterm_funcs}, + {"screen", screen_keys, screen_funcs}, + // let's assume that 'cygwin' is xterm compatible + {"cygwin", xterm_keys, xterm_funcs}, + {"st", xterm_keys, xterm_funcs}, + } + + // try compatibility variants + for _, it := range compat_table { + if strings.Contains(name, it.partial) { + keys = it.keys + funcs = it.funcs + return nil + } + } + + return errors.New("termbox: unsupported terminal") +} + +func setup_term() (err error) { + var data []byte + var header [6]int16 + var str_offset, table_offset int16 + + data, err = load_terminfo() + if err != nil { + return setup_term_builtin() + } + + rd := bytes.NewReader(data) + // 0: magic number, 1: size of names section, 2: size of boolean section, 3: + // size of numbers section (in integers), 4: size of the strings section (in + // integers), 5: size of the string table + + err = binary.Read(rd, binary.LittleEndian, header[:]) + if err != nil { + return + } + + number_sec_len := int16(2) + if header[0] == 542 { // doc says it should be octal 0542, but what I see it terminfo files is 542, learn to program please... thank you.. + number_sec_len = 4 + } + + if (header[1]+header[2])%2 != 0 { + // old quirk to align everything on word boundaries + header[2] += 1 + } + str_offset = ti_header_length + header[1] + header[2] + number_sec_len*header[3] + table_offset = str_offset + 2*header[4] + + keys = make([]string, 0xFFFF-key_min) + for i, _ := range keys { + keys[i], err = ti_read_string(rd, str_offset+2*ti_keys[i], table_offset) + if err != nil { + return + } + } + funcs = make([]string, t_max_funcs) + // the last two entries are reserved for mouse. because the table offset is + // not there, the two entries have to fill in manually + for i, _ := range funcs[:len(funcs)-2] { + funcs[i], err = ti_read_string(rd, str_offset+2*ti_funcs[i], table_offset) + if err != nil { + return + } + } + funcs[t_max_funcs-2] = ti_mouse_enter + funcs[t_max_funcs-1] = ti_mouse_leave + return nil +} + +func ti_read_string(rd *bytes.Reader, str_off, table int16) (string, error) { + var off int16 + + _, err := rd.Seek(int64(str_off), 0) + if err != nil { + return "", err + } + err = binary.Read(rd, binary.LittleEndian, &off) + if err != nil { + return "", err + } + _, err = rd.Seek(int64(table+off), 0) + if err != nil { + return "", err + } + var bs []byte + for { + b, err := rd.ReadByte() + if err != nil { + return "", err + } + if b == byte(0x00) { + break + } + bs = append(bs, b) + } + return string(bs), nil +} + +// "Maps" the function constants from termbox.go to the number of the respective +// string capability in the terminfo file. Taken from (ncurses) term.h. +var ti_funcs = []int16{ + 28, 40, 16, 13, 5, 39, 36, 27, 26, 34, 89, 88, +} + +// Same as above for the special keys. +var ti_keys = []int16{ + 66, 68 /* apparently not a typo; 67 is F10 for whatever reason */, 69, 70, + 71, 72, 73, 74, 75, 67, 216, 217, 77, 59, 76, 164, 82, 81, 87, 61, 79, 83, +} diff --git a/vendor/github.com/nsf/termbox-go/terminfo_builtin.go b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go new file mode 100644 index 0000000..a948660 --- /dev/null +++ b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go @@ -0,0 +1,64 @@ +// +build !windows + +package termbox + +// Eterm +var eterm_keys = []string{ + "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C", +} +var eterm_funcs = []string{ + "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "", +} + +// screen +var screen_keys = []string{ + "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC", +} +var screen_funcs = []string{ + "\x1b[?1049h", "\x1b[?1049l", "\x1b[34h\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave, +} + +// xterm +var xterm_keys = []string{ + "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1bOH", "\x1bOF", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC", +} +var xterm_funcs = []string{ + "\x1b[?1049h", "\x1b[?1049l", "\x1b[?12l\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b(B\x1b[m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave, +} + +// rxvt-unicode +var rxvt_unicode_keys = []string{ + "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C", +} +var rxvt_unicode_funcs = []string{ + "\x1b[?1049h", "\x1b[r\x1b[?1049l", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x1b(B", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave, +} + +// linux +var linux_keys = []string{ + "\x1b[[A", "\x1b[[B", "\x1b[[C", "\x1b[[D", "\x1b[[E", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C", +} +var linux_funcs = []string{ + "", "", "\x1b[?25h\x1b[?0c", "\x1b[?25l\x1b[?1c", "\x1b[H\x1b[J", "\x1b[0;10m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "", +} + +// rxvt-256color +var rxvt_256color_keys = []string{ + "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C", +} +var rxvt_256color_funcs = []string{ + "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave, +} + +var terms = []struct { + name string + keys []string + funcs []string +}{ + {"Eterm", eterm_keys, eterm_funcs}, + {"screen", screen_keys, screen_funcs}, + {"xterm", xterm_keys, xterm_funcs}, + {"rxvt-unicode", rxvt_unicode_keys, rxvt_unicode_funcs}, + {"linux", linux_keys, linux_funcs}, + {"rxvt-256color", rxvt_256color_keys, rxvt_256color_funcs}, +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000..588ceca --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000..835ba3e --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000..273db3c --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000..a932ead --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000..842ee80 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000..6b1f289 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/valyala/bytebufferpool/.travis.yml b/vendor/github.com/valyala/bytebufferpool/.travis.yml new file mode 100644 index 0000000..6a6ec2e --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.6 + +script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + - GOARCH=386 go build + + # run tests on a standard platform + - go test -v ./... diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE new file mode 100644 index 0000000..f7c935c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/bytebufferpool/README.md b/vendor/github.com/valyala/bytebufferpool/README.md new file mode 100644 index 0000000..061357e --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/README.md @@ -0,0 +1,21 @@ +[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) +[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) +[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) + +# bytebufferpool + +An implementation of a pool of byte buffers with anti-memory-waste protection. + +The pool may waste limited amount of memory due to fragmentation. +This amount equals to the maximum total size of the byte buffers +in concurrent use. + +# Benchmark results +Currently bytebufferpool is fastest and most effective buffer pool written in Go. + +You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). + +# bytebufferpool users + +* [fasthttp](https://github.com/valyala/fasthttp) +* [quicktemplate](https://github.com/valyala/quicktemplate) diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 0000000..07a055a --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 0000000..e511b7c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 0000000..8bb4134 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} diff --git a/vendor/github.com/valyala/fasttemplate/LICENSE b/vendor/github.com/valyala/fasttemplate/LICENSE new file mode 100644 index 0000000..7125a63 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Aliaksandr Valialkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/fasttemplate/README.md b/vendor/github.com/valyala/fasttemplate/README.md new file mode 100644 index 0000000..3a4d56c --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/README.md @@ -0,0 +1,85 @@ +fasttemplate +============ + +Simple and fast template engine for Go. + +Fasttemplate peforms only a single task - it substitutes template placeholders +with user-defined values. At high speed :) + +Take a look at [quicktemplate](https://github.com/valyala/quicktemplate) if you need fast yet powerful html template engine. + +*Please note that fasttemplate doesn't do any escaping on template values +unlike [html/template](http://golang.org/pkg/html/template/) do. So values +must be properly escaped before passing them to fasttemplate.* + +Fasttemplate is faster than [text/template](http://golang.org/pkg/text/template/), +[strings.Replace](http://golang.org/pkg/strings/#Replace), +[strings.Replacer](http://golang.org/pkg/strings/#Replacer) +and [fmt.Fprintf](https://golang.org/pkg/fmt/#Fprintf) on placeholders' substitution. + +Below are benchmark results comparing fasttemplate performance to text/template, +strings.Replace, strings.Replacer and fmt.Fprintf: + +``` +$ go test -bench=. -benchmem +PASS +BenchmarkFmtFprintf-4 2000000 790 ns/op 0 B/op 0 allocs/op +BenchmarkStringsReplace-4 500000 3474 ns/op 2112 B/op 14 allocs/op +BenchmarkStringsReplacer-4 500000 2657 ns/op 2256 B/op 23 allocs/op +BenchmarkTextTemplate-4 500000 3333 ns/op 336 B/op 19 allocs/op +BenchmarkFastTemplateExecuteFunc-4 5000000 349 ns/op 0 B/op 0 allocs/op +BenchmarkFastTemplateExecute-4 3000000 383 ns/op 0 B/op 0 allocs/op +BenchmarkFastTemplateExecuteFuncString-4 3000000 549 ns/op 144 B/op 1 allocs/op +BenchmarkFastTemplateExecuteString-4 3000000 572 ns/op 144 B/op 1 allocs/op +BenchmarkFastTemplateExecuteTagFunc-4 2000000 743 ns/op 144 B/op 3 allocs/op +``` + + +Docs +==== + +See http://godoc.org/github.com/valyala/fasttemplate . + + +Usage +===== + +```go + template := "http://{{host}}/?q={{query}}&foo={{bar}}{{bar}}" + t := fasttemplate.New(template, "{{", "}}") + s := t.ExecuteString(map[string]interface{}{ + "host": "google.com", + "query": url.QueryEscape("hello=world"), + "bar": "foobar", + }) + fmt.Printf("%s", s) + + // Output: + // http://google.com/?q=hello%3Dworld&foo=foobarfoobar +``` + + +Advanced usage +============== + +```go + template := "Hello, [user]! You won [prize]!!! [foobar]" + t, err := fasttemplate.NewTemplate(template, "[", "]") + if err != nil { + log.Fatalf("unexpected error when parsing template: %s", err) + } + s := t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + switch tag { + case "user": + return w.Write([]byte("John")) + case "prize": + return w.Write([]byte("$100500")) + default: + return w.Write([]byte(fmt.Sprintf("[unknown tag %q]", tag))) + } + }) + fmt.Printf("%s", s) + + // Output: + // Hello, John! You won $100500!!! [unknown tag "foobar"] +``` diff --git a/vendor/github.com/valyala/fasttemplate/template.go b/vendor/github.com/valyala/fasttemplate/template.go new file mode 100644 index 0000000..9120920 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/template.go @@ -0,0 +1,317 @@ +// Package fasttemplate implements simple and fast template library. +// +// Fasttemplate is faster than text/template, strings.Replace +// and strings.Replacer. +// +// Fasttemplate ideally fits for fast and simple placeholders' substitutions. +package fasttemplate + +import ( + "bytes" + "fmt" + "github.com/valyala/bytebufferpool" + "io" +) + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFunc for frozen templates. +func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int64, error) { + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + var nn int64 + var ni int + var err error + for { + n := bytes.Index(s, a) + if n < 0 { + break + } + ni, err = w.Write(s[:n]) + nn += int64(ni) + if err != nil { + return nn, err + } + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + // cannot find end tag - just write it to the output. + ni, _ = w.Write(a) + nn += int64(ni) + break + } + + ni, err = f(w, unsafeBytes2String(s[:n])) + nn += int64(ni) + s = s[n+len(b):] + } + ni, err = w.Write(s) + nn += int64(ni) + + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +// +// This function is optimized for constantly changing templates. +// Use Template.Execute for frozen templates. +func Execute(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { + return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteFuncString for frozen templates. +func ExecuteFuncString(template, startTag, endTag string, f TagFunc) string { + tagsCount := bytes.Count(unsafeString2Bytes(template), unsafeString2Bytes(startTag)) + if tagsCount == 0 { + return template + } + + bb := byteBufferPool.Get() + if _, err := ExecuteFunc(template, startTag, endTag, bb, f); err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + s := string(bb.B) + bb.Reset() + byteBufferPool.Put(bb) + return s +} + +var byteBufferPool bytebufferpool.Pool + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for constantly changing templates. +// Use Template.ExecuteString for frozen templates. +func ExecuteString(template, startTag, endTag string, m map[string]interface{}) string { + return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// Template implements simple template engine, which can be used for fast +// tags' (aka placeholders) substitution. +type Template struct { + template string + startTag string + endTag string + + texts [][]byte + tags []string + byteBufferPool bytebufferpool.Pool +} + +// New parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +// +// New panics if the given template cannot be parsed. Use NewTemplate instead +// if template may contain errors. +func New(template, startTag, endTag string) *Template { + t, err := NewTemplate(template, startTag, endTag) + if err != nil { + panic(err) + } + return t +} + +// NewTemplate parses the given template using the given startTag and endTag +// as tag start and tag end. +// +// The returned template can be executed by concurrently running goroutines +// using Execute* methods. +func NewTemplate(template, startTag, endTag string) (*Template, error) { + var t Template + err := t.Reset(template, startTag, endTag) + if err != nil { + return nil, err + } + return &t, nil +} + +// TagFunc can be used as a substitution value in the map passed to Execute*. +// Execute* functions pass tag (placeholder) name in 'tag' argument. +// +// TagFunc must be safe to call from concurrently running goroutines. +// +// TagFunc must write contents to w and return the number of bytes written. +type TagFunc func(w io.Writer, tag string) (int, error) + +// Reset resets the template t to new one defined by +// template, startTag and endTag. +// +// Reset allows Template object re-use. +// +// Reset may be called only if no other goroutines call t methods at the moment. +func (t *Template) Reset(template, startTag, endTag string) error { + // Keep these vars in t, so GC won't collect them and won't break + // vars derived via unsafe* + t.template = template + t.startTag = startTag + t.endTag = endTag + t.texts = t.texts[:0] + t.tags = t.tags[:0] + + if len(startTag) == 0 { + panic("startTag cannot be empty") + } + if len(endTag) == 0 { + panic("endTag cannot be empty") + } + + s := unsafeString2Bytes(template) + a := unsafeString2Bytes(startTag) + b := unsafeString2Bytes(endTag) + + tagsCount := bytes.Count(s, a) + if tagsCount == 0 { + return nil + } + + if tagsCount+1 > cap(t.texts) { + t.texts = make([][]byte, 0, tagsCount+1) + } + if tagsCount > cap(t.tags) { + t.tags = make([]string, 0, tagsCount) + } + + for { + n := bytes.Index(s, a) + if n < 0 { + t.texts = append(t.texts, s) + break + } + t.texts = append(t.texts, s[:n]) + + s = s[n+len(a):] + n = bytes.Index(s, b) + if n < 0 { + return fmt.Errorf("Cannot find end tag=%q in the template=%q starting from %q", endTag, template, s) + } + + t.tags = append(t.tags, unsafeBytes2String(s[:n])) + s = s[n+len(b):] + } + + return nil +} + +// ExecuteFunc calls f on each template tag (placeholder) occurrence. +// +// Returns the number of bytes written to w. +// +// This function is optimized for frozen templates. +// Use ExecuteFunc for constantly changing templates. +func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) { + var nn int64 + + n := len(t.texts) - 1 + if n == -1 { + ni, err := w.Write(unsafeString2Bytes(t.template)) + return int64(ni), err + } + + for i := 0; i < n; i++ { + ni, err := w.Write(t.texts[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + + ni, err = f(w, t.tags[i]) + nn += int64(ni) + if err != nil { + return nn, err + } + } + ni, err := w.Write(t.texts[n]) + nn += int64(ni) + return nn, err +} + +// Execute substitutes template tags (placeholders) with the corresponding +// values from the map m and writes the result to the given writer w. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// Returns the number of bytes written to w. +func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error) { + return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +// ExecuteFuncString calls f on each template tag (placeholder) occurrence +// and substitutes it with the data written to TagFunc's w. +// +// Returns the resulting string. +// +// This function is optimized for frozen templates. +// Use ExecuteFuncString for constantly changing templates. +func (t *Template) ExecuteFuncString(f TagFunc) string { + bb := t.byteBufferPool.Get() + if _, err := t.ExecuteFunc(bb, f); err != nil { + panic(fmt.Sprintf("unexpected error: %s", err)) + } + s := string(bb.Bytes()) + bb.Reset() + t.byteBufferPool.Put(bb) + return s +} + +// ExecuteString substitutes template tags (placeholders) with the corresponding +// values from the map m and returns the result. +// +// Substitution map m may contain values with the following types: +// * []byte - the fastest value type +// * string - convenient value type +// * TagFunc - flexible value type +// +// This function is optimized for frozen templates. +// Use ExecuteString for constantly changing templates. +func (t *Template) ExecuteString(m map[string]interface{}) string { + return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) +} + +func stdTagFunc(w io.Writer, tag string, m map[string]interface{}) (int, error) { + v := m[tag] + if v == nil { + return 0, nil + } + switch value := v.(type) { + case []byte: + return w.Write(value) + case string: + return w.Write([]byte(value)) + case TagFunc: + return value(w, tag) + default: + panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) + } +} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe.go b/vendor/github.com/valyala/fasttemplate/unsafe.go new file mode 100644 index 0000000..0498248 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/unsafe.go @@ -0,0 +1,22 @@ +// +build !appengine + +package fasttemplate + +import ( + "reflect" + "unsafe" +) + +func unsafeBytes2String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func unsafeString2Bytes(s string) []byte { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{ + Data: sh.Data, + Len: sh.Len, + Cap: sh.Len, + } + return *(*[]byte)(unsafe.Pointer(&bh)) +} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe_gae.go b/vendor/github.com/valyala/fasttemplate/unsafe_gae.go new file mode 100644 index 0000000..cc4ce15 --- /dev/null +++ b/vendor/github.com/valyala/fasttemplate/unsafe_gae.go @@ -0,0 +1,11 @@ +// +build appengine + +package fasttemplate + +func unsafeBytes2String(b []byte) string { + return string(b) +} + +func unsafeString2Bytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go new file mode 100644 index 0000000..7df6476 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -0,0 +1,922 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec. +// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package acme + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strings" + "sync" + "time" +) + +const ( + // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. + LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" + + // ALPNProto is the ALPN protocol name used by a CA server when validating + // tls-alpn-01 challenges. + // + // Package users must ensure their servers can negotiate the ACME ALPN in + // order for tls-alpn-01 challenge verifications to succeed. + // See the crypto/tls package's Config.NextProtos field. + ALPNProto = "acme-tls/1" +) + +// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge. +var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in bytes + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +// +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + // RetryBackoff computes the duration after which the nth retry of a failed request + // should occur. The value of n for the first call on failure is 1. + // The values of r and resp are the request and response of the last failed attempt. + // If the returned value is negative or zero, no more retries are done and an error + // is returned to the caller of the original method. + // + // Requests which result in a 4xx client error are not retried, + // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. + // + // If RetryBackoff is nil, a truncated exponential backoff algorithm + // with the ceiling of 10 seconds is used, where each subsequent retry n + // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), + // preferring the former if "Retry-After" header is found in the resp. + // The jitter is a random value up to 1 second. + RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration + + dirMu sync.Mutex // guards writes to dir + dir *Directory // cached result of Client's Discover method + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.dirMu.Lock() + defer c.dirMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + dirURL := c.DirectoryURL + if dirURL == "" { + dirURL = LetsEncryptURL + } + res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK)) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + + var v struct { + Reg string `json:"new-reg"` + Authz string `json:"new-authz"` + Cert string `json:"new-cert"` + Revoke string `json:"revoke-cert"` + Meta struct { + Terms string `json:"terms-of-service"` + Website string `json:"website"` + CAA []string `json:"caa-identities"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + CertURL: v.Cert, + RevokeURL: v.Revoke, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + } + return *c.dir, nil +} + +// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. +// The exp argument indicates the desired certificate validity duration. CA may issue a certificate +// with a different duration. +// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. +// +// In the case where CA server does not provide the issued certificate in the response, +// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. +// In such a scenario, the caller can cancel the polling with ctx. +// +// CreateCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { + return nil, "", err + } + + req := struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{ + Resource: "new-cert", + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + now := timeNow() + req.NotBefore = now.Format(time.RFC3339) + if exp > 0 { + req.NotAfter = now.Add(exp).Format(time.RFC3339) + } + + res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + + curl := res.Header.Get("Location") // cert permanent URL + if res.ContentLength == 0 { + // no cert in the body; poll until we get it + cert, err := c.FetchCert(ctx, curl, bundle) + return cert, curl, err + } + // slurp issued cert and CA chain, if requested + cert, err := c.responseCert(ctx, res, bundle) + return cert, curl, err +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + return c.responseCert(ctx, res, bundle) +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + body := &struct { + Resource string `json:"resource"` + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Resource: "revoke-cert", + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + if key == nil { + key = c.Key + } + res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account registration by following the "new-reg" flow. +// It returns the registered account. The account is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + var err error + if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { + return nil, err + } + var accept bool + if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { + accept = prompt(a.CurrentTerms) + } + if accept { + a.AgreedTerms = a.CurrentTerms + a, err = c.UpdateReg(ctx, a) + } + return a, err +} + +// GetReg retrieves an existing registration. +// The url argument is an Account URI. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + a, err := c.doReg(ctx, url, "reg", nil) + if err != nil { + return nil, err + } + a.URI = url + return a, nil +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { + uri := a.URI + a, err := c.doReg(ctx, uri, "reg", a) + if err != nil { + return nil, err + } + a.URI = uri + return a, nil +} + +// Authorize performs the initial step in an authorization flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization (Authorization.Status is StatusValid). If so, the caller +// need not fulfill any challenge and can proceed to requesting a certificate. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: "dns", Value: domain}, + } + res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + defer res.Body.Close() + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize method before being able to request a new certificate +// for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + for { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case raw.Status == StatusValid: + return raw.authorization(url), nil + case raw.Status == StatusInvalid: + return nil, raw.error(url) + } + + // Exponential backoff is implemented in c.get above. + // This is just to prevent continuously hitting the CA + // while waiting for a final authorization status. + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Given that the fastest challenges TLS-SNI and HTTP-01 + // require a CA to make at least 1 network round trip + // and most likely persist a challenge state, + // this default delay seems reasonable. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + defer res.Body.Close() + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + auth, err := keyAuth(c.Key.Public(), chal.Token) + if err != nil { + return nil, err + } + + req := struct { + Resource string `json:"resource"` + Type string `json:"type"` + Auth string `json:"keyAuthorization"` + }{ + Resource: "challenge", + Type: chal.Type, + Auth: auth, + } + res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus( + http.StatusOK, // according to the spec + http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. +// +// The implementation is incomplete in that the returned value is a single certificate, +// computed only for Z0 of the key authorization. ACME CAs are expected to update +// their implementations to use the newer version, TLS-SNI-02. +// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name of the TLS ClientHello matches exactly the returned name value. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-SNI-02 see +// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches exactly the returned name value. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-ALPN-01 see +// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// has been specified. +func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, err + } + shasum := sha256.Sum256([]byte(ka)) + extValue, err := asn1.Marshal(shasum[:]) + if err != nil { + return tls.Certificate{}, err + } + acmeExtension := pkix.Extension{ + Id: idPeACMEIdentifierV1, + Critical: true, + Value: extValue, + } + + tmpl := defaultTLSChallengeCertTemplate() + + var newOpt []CertOption + for _, o := range opt { + switch o := o.(type) { + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + newOpt = append(newOpt, o) + } + } + tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) + newOpt = append(newOpt, WithTemplate(tmpl)) + return tlsChallengeCert([]string{domain}, newOpt) +} + +// doReg sends all types of registration requests. +// The type of request is identified by typ argument, which is a "resource" +// in the ACME spec terms. +// +// A non-nil acct argument indicates whether the intention is to mutate data +// of the Account. Only Contact and Agreement of its fields are used +// in such cases. +func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { + req := struct { + Resource string `json:"resource"` + Contact []string `json:"contact,omitempty"` + Agreement string `json:"agreement,omitempty"` + }{ + Resource: typ, + } + if acct != nil { + req.Contact = acct.Contact + req.Agreement = acct.AgreedTerms + } + res, err := c.post(ctx, c.Key, url, req, wantStatus( + http.StatusOK, // updates and deletes + http.StatusCreated, // new account creation + http.StatusAccepted, // Let's Encrypt divergent implementation + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v struct { + Contact []string + Agreement string + Authorizations string + Certificates string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + var tos string + if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { + tos = v[0] + } + var authz string + if v := linkHeader(res.Header, "next"); len(v) > 0 { + authz = v[0] + } + return &Account{ + URI: res.Header.Get("Location"), + Contact: v.Contact, + AgreedTerms: v.Agreement, + CurrentTerms: tos, + Authz: authz, + Authorizations: v.Authorizations, + Certificates: v.Certificates, + }, nil +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from the given URL. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + return c.fetchNonce(ctx, url) + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return "", err + } + resp, err := c.doNoRetry(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, fmt.Errorf("acme: response stream: %v", err) + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + cert := [][]byte{b} + if !bundle { + return cert, nil + } + + // Append CA chain cert(s). + // At least one is required according to the spec: + // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 + up := linkHeader(res.Header, "up") + if len(up) == 0 { + return nil, errors.New("acme: rel=up link not found") + } + if len(up) > maxChainLen { + return nil, errors.New("acme: rel=up link is too large") + } + for _, url := range up { + cc, err := c.chainCert(ctx, url, 0) + if err != nil { + return nil, err + } + cert = append(cert, cc...) + } + return cert, nil +} + +// chainCert fetches CA certificate chain recursively by following "up" links. +// Each recursive call increments the depth by 1, resulting in an error +// if the recursion level reaches maxChainLen. +// +// First chainCert call starts with depth of 0. +func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { + if depth >= maxChainLen { + return nil, errors.New("acme: certificate chain is too deep") + } + + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, err + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + chain := [][]byte{b} + + uplink := linkHeader(res.Header, "up") + if len(uplink) > maxChainLen { + return nil, errors.New("acme: certificate chain is too large") + } + for _, up := range uplink { + cc, err := c.chainCert(ctx, up, depth+1) + if err != nil { + return nil, err + } + chain = append(chain, cc...) + } + + return chain, nil +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. +func defaultTLSChallengeCertTemplate() *x509.Certificate { + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var key crypto.Signer + tmpl := defaultTLSChallengeCertTemplate() + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is useful for testing for fixed current time. +var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go new file mode 100644 index 0000000..4c2fc07 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -0,0 +1,1139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" +) + +// createCertRetryAfter is how much time to wait before removing a failed state +// entry due to an unsuccessful createCert call. +// This is a variable instead of a const for testing. +// TODO: Consider making it configurable or an exp backoff? +var createCertRetryAfter = time.Minute + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +func init() { + src := mathrand.NewSource(time.Now().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS is a Manager.Prompt function that always returns true to +// indicate acceptance of the CA's Terms of Service during account +// registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + whitelist[h] = true + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return errors.New("acme/autocert: host not configured") + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically using "tls-alpn-01", +// "tls-sni-01", "tls-sni-02" and "http-01" challenge types, +// as well as providing them to a TLS server via tls.Config. +// +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates + // and other state. If nil, certs will only be cached for the lifetime of + // the Manager. Multiple Managers can share the same Cache. + // + // Using a persistent Cache, such as DirCache, is strongly recommended. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 30 days before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // + // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL + // as directory endpoint. If the Client.Key is nil, a new ECDSA P-256 key is + // generated and, if Cache is not nil, stored in cache. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. + // + // Deprecated: the Manager will request the correct type of certificate based + // on what each client supports. + ForceRSA bool + + // ExtraExtensions are used when generating a new CSR (Certificate Request), + // thus allowing customization of the resulting certificate. + // For instance, TLS Feature Extension (RFC 7633) can be used + // to prevent an OCSP downgrade attack. + // + // The field value is passed to crypto/x509.CreateCertificateRequest + // in the template's ExtraExtensions field as is. + ExtraExtensions []pkix.Extension + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[certKey]*certState + + // renewal tracks the set of domains currently running renewal timers. + renewalMu sync.Mutex + renewal map[certKey]*domainRenewal + + // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. + tokensMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-sni and tls-alpn challenges + // and is keyed by token domain name, which matches server name of ClientHello. + // Keys always have ".acme.invalid" suffix for tls-sni. Otherwise, they are domain names + // for tls-alpn. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate + // nowFunc, if not nil, returns the current time. This may be set for + // testing purposes. + nowFunc func() time.Time +} + +// certKey is the key by which certificates are tracked in state, renewal and cache. +type certKey struct { + domain string // without trailing dot + isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) + isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA +} + +func (c certKey) String() string { + if c.isToken { + return c.domain + "+token" + } + if c.isRSA { + return c.domain + "+rsa" + } + return c.domain +} + +// TLSConfig creates a new TLS config suitable for net/http.Server servers, +// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. +func (m *Manager) TLSConfig() *tls.Config { + return &tls.Config{ + GetCertificate: m.GetCertificate, + NextProtos: []string{ + "h2", "http/1.1", // enable HTTP/2 + acme.ALPNProto, // enable tls-alpn ACME challenges + }, + } +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// tls-alpn-01 and *.acme.invalid (tls-sni-01 and tls-sni-02) challenges. +// All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +// +// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will +// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler +// for http-01. (The tls-sni-* challenges have been deprecated by popular ACME providers +// due to security issues in the ecosystem.) +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if m.Prompt == nil { + return nil, errors.New("acme/autocert: Manager.Prompt not set") + } + + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("acme/autocert: server name component count invalid") + } + if strings.ContainsAny(name, `+/\`) { + return nil, errors.New("acme/autocert: server name contains invalid character") + } + + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Check whether this is a token cert requested for TLS-SNI or TLS-ALPN challenge. + if wantsTokenCert(hello) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + // It's ok to use the same token cert key for both tls-sni and tls-alpn + // because there's always at most 1 token cert per on-going domain authorization. + // See m.verify for details. + if cert := m.certTokens[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + ck := certKey{ + domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 + isRSA: !supportsECDSA(hello), + } + cert, err := m.cert(ctx, ck) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, ck) + if err != nil { + return nil, err + } + m.cachePut(ctx, ck, cert) + return cert, nil +} + +// wantsTokenCert reports whether a TLS request with SNI is made by a CA server +// for a challenge verification. +func wantsTokenCert(hello *tls.ClientHelloInfo) bool { + // tls-alpn-01 + if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { + return true + } + // tls-sni-xx + return strings.HasSuffix(hello.ServerName, ".acme.invalid") +} + +func supportsECDSA(hello *tls.ClientHelloInfo) bool { + // The "signature_algorithms" extension, if present, limits the key exchange + // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. + if hello.SignatureSchemes != nil { + ecdsaOK := false + schemeLoop: + for _, scheme := range hello.SignatureSchemes { + const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 + switch scheme { + case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, + tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: + ecdsaOK = true + break schemeLoop + } + } + if !ecdsaOK { + return false + } + } + if hello.SupportedCurves != nil { + ecdsaOK := false + for _, curve := range hello.SupportedCurves { + if curve == tls.CurveP256 { + ecdsaOK = true + break + } + } + if !ecdsaOK { + return false + } + } + for _, suite := range hello.CipherSuites { + switch suite { + case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: + return true + } + } + return false +} + +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01" +// challenge for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[ck]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(ctx, ck) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[certKey]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[ck] = s + go m.renew(ck, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +// If a cached certificate exists but is not valid, ErrCacheMiss is returned. +func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + data, err := m.Cache.Get(ctx, ck.String()) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, ErrCacheMiss + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + // Leftover content not consumed by pem.Decode. Corrupt. Ignore. + return nil, ErrCacheMiss + } + + // verify and create TLS cert + leaf, err := validCert(ck, pubDER, privKey, m.now()) + if err != nil { + return nil, ErrCacheMiss + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + return m.Cache.Put(ctx, ck.String(), buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(ck) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, ck) + if err != nil { + // Remove the failed state after some time, + // making the manager call createCert again on the following TLS hello. + time.AfterFunc(createCertRetryAfter, func() { + defer testDidRemoveState(ck) + m.stateMu.Lock() + defer m.stateMu.Unlock() + // Verify the state hasn't changed and it's still invalid + // before deleting. + s, ok := m.state[ck] + if !ok { + return + } + if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil { + return + } + delete(m.state, ck) + }) + return nil, err + } + state.cert = der + state.leaf = leaf + go m.renew(ck, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(ck certKey) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[certKey]*certState) + } + // existing state + if state, ok := m.state[ck]; ok { + return state, nil + } + + // new locked state + var ( + err error + key crypto.Signer + ) + if ck.isRSA { + key, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + if err != nil { + return nil, err + } + + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[ck] = state + return state, nil +} + +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + + if err := m.verify(ctx, client, ck.domain); err != nil { + return nil, nil, err + } + csr, err := certRequest(key, ck.domain, m.ExtraExtensions) + if err != nil { + return nil, nil, err + } + der, _, err = client.CreateCert(ctx, csr, 0, true) + if err != nil { + return nil, nil, err + } + leaf, err = validCert(ck, der, key, m.now()) + if err != nil { + return nil, nil, err + } + return der, leaf, nil +} + +// revokePendingAuthz revokes all authorizations idenfied by the elements of uri slice. +// It ignores revocation errors. +func (m *Manager) revokePendingAuthz(ctx context.Context, uri []string) { + client, err := m.acmeClient(ctx) + if err != nil { + return + } + for _, u := range uri { + client.RevokeAuthorization(ctx, u) + } +} + +// verify runs the identifier (domain) authorization flow +// using each applicable ACME challenge type. +func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { + // The list of challenge types we'll try to fulfill + // in this specific order. + challengeTypes := []string{"tls-alpn-01", "tls-sni-02", "tls-sni-01"} + m.tokensMu.RLock() + if m.tryHTTP01 { + challengeTypes = append(challengeTypes, "http-01") + } + m.tokensMu.RUnlock() + + // Keep track of pending authzs and revoke the ones that did not validate. + pendingAuthzs := make(map[string]bool) + defer func() { + var uri []string + for k, pending := range pendingAuthzs { + if pending { + uri = append(uri, k) + } + } + if len(uri) > 0 { + // Use "detached" background context. + // The revocations need not happen in the current verification flow. + go m.revokePendingAuthz(context.Background(), uri) + } + }() + + // errs accumulates challenge failure errors, printed if all fail + errs := make(map[*acme.Challenge]error) + var nextTyp int // challengeType index of the next challenge type to try + for { + // Start domain authorization and get the challenge. + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err + } + // No point in accepting challenges if the authorization status + // is in a final state. + switch authz.Status { + case acme.StatusValid: + return nil // already authorized + case acme.StatusInvalid: + return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) + } + + pendingAuthzs[authz.URI] = true + + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) + nextTyp++ + } + if chal == nil { + errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain) + for chal, err := range errs { + errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err) + } + return errors.New(errorMsg) + } + cleanup, err := m.fulfill(ctx, client, chal, domain) + if err != nil { + errs[chal] = err + continue + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + errs[chal] = err + continue + } + + // A challenge is fulfilled and accepted: wait for the CA to validate. + if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil { + errs[chal] = err + continue + } + delete(pendingAuthzs, authz.URI) + return nil + } +} + +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { + switch chal.Type { + case "tls-alpn-01": + cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) + if err != nil { + return nil, err + } + m.putCertToken(ctx, domain, &cert) + return func() { go m.deleteCertToken(domain) }, nil + case "tls-sni-01": + cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "tls-sni-02": + cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil + } + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +// putCertToken stores the token certificate with the specified name +// in both m.certTokens map and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert + m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) +} + +// deleteCertToken removes the token certificate with the specified name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.certTokens, name) + if m.Cache != nil { + ck := certKey{domain: name, isToken: true} + m.Cache.Delete(context.Background(), ck.String()) + } +} + +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return path.Base(tokenPath) + "+http-01" +} + +// renew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[ck] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[certKey]*domainRenewal) + } + dr := &domainRenewal{m: m, ck: ck, key: key} + m.renewal[ck] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account+key" + + // Previous versions of autocert stored the value under a different key. + const legacyKeyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + data, err = m.Cache.Get(ctx, legacyKeyName) + } + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact} + _, err := client.Register(ctx, a, m.Prompt) + if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { + // conflict indicates the key is already registered + m.client = client + err = nil + } + return m.client, err +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > renewJitter { + return m.RenewBefore + } + return 720 * time.Hour // 30 days +} + +func (m *Manager) now() time.Time { + if m.nowFunc != nil { + return m.nowFunc() + } + return time.Now() +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest generates a CSR for the given common name cn and optional SANs. +func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: cn}, + DNSNames: san, + ExtraExtensions: ext, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] +// correspond to the private key, the domain and key type match, and expiration dates +// are valid. It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if err != nil || len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(ck.domain); err != nil { + return nil, err + } + // ensure the leaf corresponds to the private key and matches the certKey type + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if !ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// For easier testing. +var ( + // Called when a state is removed. + testDidRemoveState = func(certKey) {} +) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go new file mode 100644 index 0000000..aa9aa84 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// and other account data as opaque blobs. +// +// Cache implementations should not rely on the key naming pattern. Keys can +// include any printable ASCII characters, except the following: \/:*?"<>| +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Underlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), name) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = ioutil.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + select { + case <-ctx.Done(): + // Don't overwrite the file if the context was canceled. + default: + newName := filepath.Join(string(d), name) + err = os.Rename(tmp, newName) + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), name) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { + // TempFile uses 0600 permissions + f, err := ioutil.TempFile(string(d), prefix) + if err != nil { + return "", err + } + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go new file mode 100644 index 0000000..1e06981 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/tls" + "log" + "net" + "os" + "path/filepath" + "runtime" + "time" +) + +// NewListener returns a net.Listener that listens on the standard TLS +// port (443) on all interfaces and returns *tls.Conn connections with +// LetsEncrypt certificates for the provided domain or domains. +// +// It enables one-line HTTPS servers: +// +// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) +// +// NewListener is a convenience function for a common configuration. +// More complex or custom configurations can use the autocert.Manager +// type instead. +// +// Use of this function implies acceptance of the LetsEncrypt Terms of +// Service. If domains is not empty, the provided domains are passed +// to HostWhitelist. If domains is empty, the listener will do +// LetsEncrypt challenges for any requested domain, which is not +// recommended. +// +// Certificates are cached in a "golang-autocert" directory under an +// operating system-specific cache or temp directory. This may not +// be suitable for servers spanning multiple machines. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +func NewListener(domains ...string) net.Listener { + m := &Manager{ + Prompt: AcceptTOS, + } + if len(domains) > 0 { + m.HostPolicy = HostWhitelist(domains...) + } + dir := cacheDir() + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("warning: autocert.NewListener not using a cache: %v", err) + } else { + m.Cache = DirCache(dir) + } + return m.Listener() +} + +// Listener listens on the standard TLS port (443) on all interfaces +// and returns a net.Listener returning *tls.Conn connections. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +// +// Unlike NewListener, it is the caller's responsibility to initialize +// the Manager m's Prompt, Cache, HostPolicy, and other desired options. +func (m *Manager) Listener() net.Listener { + ln := &listener{ + m: m, + conf: m.TLSConfig(), + } + ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") + return ln +} + +type listener struct { + m *Manager + conf *tls.Config + + tcpListener net.Listener + tcpListenErr error +} + +func (ln *listener) Accept() (net.Conn, error) { + if ln.tcpListenErr != nil { + return nil, ln.tcpListenErr + } + conn, err := ln.tcpListener.Accept() + if err != nil { + return nil, err + } + tcpConn := conn.(*net.TCPConn) + + // Because Listener is a convenience function, help out with + // this too. This is not possible for the caller to set once + // we return a *tcp.Conn wrapping an inaccessible net.Conn. + // If callers don't want this, they can do things the manual + // way and tweak as needed. But this is what net/http does + // itself, so copy that. If net/http changes, we can change + // here too. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + + return tls.Server(tcpConn, ln.conf), nil +} + +func (ln *listener) Addr() net.Addr { + if ln.tcpListener != nil { + return ln.tcpListener.Addr() + } + // net.Listen failed. Return something non-nil in case callers + // call Addr before Accept: + return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} +} + +func (ln *listener) Close() error { + if ln.tcpListenErr != nil { + return ln.tcpListenErr + } + return ln.tcpListener.Close() +} + +func homeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + if h := os.Getenv("HOME"); h != "" { + return h + } + return "/" +} + +func cacheDir() string { + const base = "golang-autocert" + switch runtime.GOOS { + case "darwin": + return filepath.Join(homeDir(), "Library", "Caches", base) + case "windows": + for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, base) + } + } + // Worst case: + return filepath.Join(homeDir(), base) + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, base) + } + return filepath.Join(homeDir(), ".cache", base) +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go new file mode 100644 index 0000000..665f870 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "sync" + "time" +) + +// renewJitter is the maximum deviation from Manager.RenewBefore. +const renewJitter = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + ck certKey + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer. +// If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + dr.timer.Stop() + dr.timer = nil +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = renewJitter / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + dr.timer = time.AfterFunc(next, dr.renew) + testDidRenewLoop(next, err) +} + +// updateState locks and replaces the relevant Manager.state item with the given +// state. It additionally updates dr.key with the given state's key. +func (dr *domainRenewal) updateState(state *certState) { + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + dr.key = state.key + dr.m.state[dr.ck] = state +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may lock and update the Manager.state if the expiration date of the currently +// cached cert is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+renewJitter { + signer, ok := tlscert.PrivateKey.(crypto.Signer) + if ok { + state := &certState{ + key: signer, + cert: tlscert.Certificate, + leaf: tlscert.Leaf, + } + dr.updateState(state) + return next, nil + } + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { + return 0, err + } + dr.updateState(state) + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(renewJitter)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go new file mode 100644 index 0000000..a43ce6a --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/http.go @@ -0,0 +1,281 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "strconv" + "strings" + "time" +) + +// retryTimer encapsulates common logic for retrying unsuccessful requests. +// It is not safe for concurrent use. +type retryTimer struct { + // backoffFn provides backoff delay sequence for retries. + // See Client.RetryBackoff doc comment. + backoffFn func(n int, r *http.Request, res *http.Response) time.Duration + // n is the current retry attempt. + n int +} + +func (t *retryTimer) inc() { + t.n++ +} + +// backoff pauses the current goroutine as described in Client.RetryBackoff. +func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { + d := t.backoffFn(t.n, r, res) + if d <= 0 { + return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) + } + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } +} + +func (c *Client) retryTimer() *retryTimer { + f := c.RetryBackoff + if f == nil { + f = defaultBackoff + } + return &retryTimer{backoffFn: f} +} + +// defaultBackoff provides default Client.RetryBackoff implementation +// using a truncated exponential backoff algorithm, +// as described in Client.RetryBackoff. +// +// The n argument is always bounded between 1 and 30. +// The returned value is always greater than 0. +func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { + const max = 10 * time.Second + var jitter time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + // Set the minimum to 1ms to avoid a case where + // an invalid Retry-After value is parsed into 0 below, + // resulting in the 0 returned value which would unintentionally + // stop the retries. + jitter = (1 + time.Duration(x.Int64())) * time.Millisecond + } + if v, ok := res.Header["Retry-After"]; ok { + return retryAfter(v[0]) + jitter + } + + if n < 1 { + n = 1 + } + if n > 30 { + n = 30 + } + d := time.Duration(1< max { + return max + } + return d +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns zero value if v cannot be parsed. +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return 0 + } + return t.Sub(timeNow()) +} + +// resOkay is a function that reports whether the provided response is okay. +// It is expected to keep the response body unread. +type resOkay func(*http.Response) bool + +// wantStatus returns a function which reports whether the code +// matches the status code of a response. +func wantStatus(codes ...int) resOkay { + return func(res *http.Response) bool { + for _, code := range codes { + if code == res.StatusCode { + return true + } + } + return false + } +} + +// get issues an unsigned GET request to the specified URL. +// It returns a non-error value only when ok reports true. +// +// get retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := c.doNoRetry(ctx, req) + switch { + case err != nil: + return nil, err + case ok(res): + return res, nil + case isRetriable(res.StatusCode): + retry.inc() + resErr := responseError(res) + res.Body.Close() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if retry.backoff(ctx, req, res) != nil { + return nil, resErr + } + default: + defer res.Body.Close() + return nil, responseError(res) + } + } +} + +// post issues a signed POST request in JWS format using the provided key +// to the specified URL. +// It returns a non-error value only when ok reports true. +// +// post retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +// It uses postNoRetry to make individual requests. +func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + res, req, err := c.postNoRetry(ctx, key, url, body) + if err != nil { + return nil, err + } + if ok(res) { + return res, nil + } + resErr := responseError(res) + res.Body.Close() + switch { + // Check for bad nonce before isRetriable because it may have been returned + // with an unretriable response code such as 400 Bad Request. + case isBadNonce(resErr): + // Consider any previously stored nonce values to be invalid. + c.clearNonces() + case !isRetriable(res.StatusCode): + return nil, resErr + } + retry.inc() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if err := retry.backoff(ctx, req, res); err != nil { + return nil, resErr + } + } +} + +// postNoRetry signs the body with the given key and POSTs it to the provided url. +// The body argument must be JSON-serializable. +// It is used by c.post to retry unsuccessful attempts. +func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, nil, err + } + b, err := jwsEncodeJSON(body, key, nonce) + if err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/jose+json") + res, err := c.doNoRetry(ctx, req) + if err != nil { + return nil, nil, err + } + c.addNonce(res.Header) + return res, req, nil +} + +// doNoRetry issues a request req, replacing its context (if any) with ctx. +func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +// isBadNonce reports whether err is an ACME "badnonce" error. +func isBadNonce(err error) bool { + // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. + // However, ACME servers in the wild return their versions of the error. + // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. + ae, ok := err.(*Error) + return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") +} + +// isRetriable reports whether a request can be retried +// based on the response status code. +// +// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. +// Callers should parse the response and check with isBadNonce. +func isRetriable(code int) bool { + return code <= 399 || code >= 500 || code == http.StatusTooManyRequests +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := ioutil.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go new file mode 100644 index 0000000..6cbca25 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/base64" + "encoding/json" + "fmt" + "math/big" +) + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format. +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + alg, sha := jwsHasher(key) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload := base64.RawURLEncoding.EncodeToString(cs) + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// It returns ErrUnsupportedKey if the key type is unknown. +// The hash is used only for RSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch key := key.(type) { + case *rsa.PrivateKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PrivateKey: + r, s, err := ecdsa.Sign(rand.Reader, key, digest) + if err != nil { + return nil, err + } + rb, sb := r.Bytes(), s.Bytes() + size := key.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(key crypto.Signer) (string, crypto.Hash) { + switch key := key.(type) { + case *rsa.PrivateKey: + return "RS256", crypto.SHA256 + case *ecdsa.PrivateKey: + switch key.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go new file mode 100644 index 0000000..54792c0 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -0,0 +1,329 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME server response statuses used to describe Authorization and Challenge states. +const ( + StatusUnknown = "unknown" + StatusPending = "pending" + StatusProcessing = "processing" + StatusValid = "valid" + StatusInvalid = "invalid" + StatusRevoked = "revoked" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +// ErrUnsupportedKey is returned when an unsupported key type is encountered. +var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Header is the original server error response headers. + // It may be nil. + Header http.Header +} + +func (e *Error) Error() string { + return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After")), true +} + +// Account is a user account. It is associated with a private key. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + URI string + + // Contact is a slice of contact info used during registration. + Contact []string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + AgreedTerms string + + // Actual terms of a CA. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + Certificates string +} + +// Directory is ACME server discovery data. +type Directory struct { + // RegURL is an account endpoint URL, allowing for creating new + // and modifying existing accounts. + RegURL string + + // AuthzURL is used to initiate Identifier Authorization flow. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC6844. + CAA []string +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + Status string + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status identifies the status of an authorization. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For final authorizations, the challenges that were used. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, e.g. "dns". + Value string // The identifier itself, e.g. "example.org". +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Status string + Challenges []wireChallenge + Combinations [][]int + Identifier struct { + Type string + Value string + } +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Combinations: z.Combinations, // shallow copy + Challenges: make([]*Challenge, len(z.Challenges)), + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + return err +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URI string `json:"uri"` + Type string + Token string + Status string + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URI, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string +} + +func (e *wireError) error(h http.Header) *Error { + return &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Header: h, + } +} + +// CertOption is an optional argument type for the TLS ChallengeCert methods for +// customizing a temporary certificate for TLS-based challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLS ChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..a3c021d --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 0000000..748118d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,796 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 0000000..fa38770 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 0000000..d1c3de8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 0000000..416e81c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 0000000..89656f4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 0000000..cb82ec2 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 0000000..4089056 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 0000000..fdde288 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 0000000..5b79cbe --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
+// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for